Update normalization description at nn/layer/normalization.py

This commit is contained in:
dinglinhe 2021-04-20 10:37:25 +08:00
parent ba5b751418
commit 54fb72e2b3
14 changed files with 25 additions and 25 deletions

View File

@ -564,7 +564,7 @@ std::string Execute::AippCfgGenerator() {
std::vector<uint32_t> aipp_size = AippSizeFilter(resize_paras, crop_paras);
// Process normalization parameters to find out the final normalization parameters for Aipp module
// Process Normalization parameters to find out the final Normalization parameters for Aipp module
std::vector<uint32_t> normalize_paras;
if (info_->aipp_cfg_.find(vision::kDvppNormalizeOperation) != info_->aipp_cfg_.end()) {
for (auto pos = info_->aipp_cfg_.equal_range(vision::kDvppNormalizeOperation); pos.first != pos.second;

View File

@ -39,7 +39,7 @@ NormalizeOp::NormalizeOp(float mean_r, float mean_g, float mean_b, float std_r,
Status NormalizeOp::Compute(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *output) {
IO_CHECK(input, output);
// Doing the normalization
// Doing the Normalization
return Normalize(input, output, mean_, std_);
}

View File

@ -37,7 +37,7 @@ NormalizePadOp::NormalizePadOp(float mean_r, float mean_g, float mean_b, float s
Status NormalizePadOp::Compute(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *output) {
IO_CHECK(input, output);
// Doing the normalization + pad
// Doing the Normalization + pad
return NormalizePad(input, output, mean_, std_, dtype_);
}

View File

@ -27,7 +27,7 @@ __all__ = ["LessBN"]
class CommonHeadLastFN(Cell):
r"""
The last full normalization layer.
The last full Normalization layer.
This layer implements the operation as:

View File

@ -538,12 +538,12 @@ class BatchNorm3d(Cell):
class GlobalBatchNorm(_BatchNorm):
r"""
Global normalization layer over a N-dimension input.
Global Batch Normalization layer over a N-dimension input.
Global Normalization is cross device synchronized Batch Normalization. The implementation of Batch Normalization
only normalizes the data within each device. Global normalization will normalize the input within the group.
It has been described in the paper `Batch Normalization: Accelerating Deep Network Training by
Reducing Internal Covariate Shift <https://arxiv.org/abs/1502.03167>`_. It rescales and recenters the
Global Batch Normalization is cross device synchronized Batch Normalization. The implementation of
Batch Normalization only normalizes the data within each device. Global Normalization will normalize
the input within the group.It has been described in the paper `Batch Normalization: Accelerating Deep Network
Training by Reducing Internal Covariate Shift <https://arxiv.org/abs/1502.03167>`_. It rescales and recenters the
feature using a mini-batch of data and the learned parameters which can be described in the following formula.
.. math::
@ -1003,9 +1003,9 @@ class GroupNorm(Cell):
r"""
Group Normalization over a mini-batch of inputs.
Group normalization is widely used in recurrent neural networks. It applies
Group Normalization is widely used in recurrent neural networks. It applies
normalization on a mini-batch of inputs for each single training case as described
in the paper `Group Normalization <https://arxiv.org/pdf/1803.08494.pdf>`_. Group normalization
in the paper `Group Normalization <https://arxiv.org/pdf/1803.08494.pdf>`_. Group Normalization
divides the channels into groups and computes within each group the mean and variance for normalization,
and it performs very stable over a wide range of batch size. It can be described using the following formula.

View File

@ -32,7 +32,7 @@ class ConfusionMatrix(Metric):
num_classes (int): Number of classes in the dataset.
normalize (str): The parameter of calculating ConfusionMatrix supports four Normalization modes, Choose from:
- **'no_norm'** (None) - No normalization is used. Default: None.
- **'no_norm'** (None) - No Normalization is used. Default: None.
- **'target'** (str) - Normalization based on target value.
- **'prediction'** (str) - Normalization based on predicted value.
- **'all'** (str) - Normalization over the whole matrix.

View File

@ -2300,9 +2300,9 @@ def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None, aweights=N
a variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows contain
observations.
bias (bool, optional): Default normalization (``False``) is by :math:`(N - 1)`, where
bias (bool, optional): Default Normalization (``False``) is by :math:`(N - 1)`, where
:math:`N` is the number of observations given (unbiased estimate). If bias is
``True``, then normalization is by `N`. These values can be overridden by
``True``, then Normalization is by `N`. These values can be overridden by
using the keyword `ddof`.
ddof (int, optional): If not ``None``, the default value implied by `bias` is
overridden. Note that :math:`ddof=1` will return the unbiased estimate, even
@ -2364,7 +2364,7 @@ def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None, aweights=N
avg = average(m, axis=1, weights=w)
# Determine the normalization
# Determine the Normalization
if w is None:
fact = m.shape[1] - ddof
else:

View File

@ -1141,7 +1141,7 @@ class L2NormalizeGrad(PrimitiveWithInfer):
class LayerNormGrad(Primitive):
"""
Applies the layer normalization to the input array.
Applies the layer Normalization to the input array.
This operator will calculate the input gradients of layernorm.

View File

@ -816,7 +816,7 @@ class FusedBatchNormEx(PrimitiveWithCheck):
class InstanceNorm(PrimitiveWithInfer):
r"""
Instance normalization over a 4D input.
Instance Normalization over a 4D input.
This operator applies Instance Normalization over a 4D input (a mini-batch of 2D inputs with
additional channel dimension) as described in the paper `Instance Normalization: The Missing Ingredient for

View File

@ -74,7 +74,7 @@ class CRF(nn.Cell):
def log_sum_exp(self, logits):
'''
Compute the log_sum_exp score for normalization factor.
Compute the log_sum_exp score for Normalization factor.
'''
max_score = self.reduce_max(logits, -1) #16 5 5
score = self.log(self.reduce_sum(self.exp(logits - max_score), -1))

View File

@ -86,7 +86,7 @@ class ASRDataset(LoadAudioAndTranscript):
audio_conf: Config containing the sample rate, window and the window length/stride in seconds
manifest_filepath (str): manifest_file path.
labels (list): List containing all the possible characters to map to
normalize: Apply standard mean and deviation normalization to audio tensor
normalize: Apply standard mean and deviation Normalization to audio tensor
batch_size (int): Dataset batch size (default=32)
"""
def __init__(self, audio_conf=None,
@ -195,7 +195,7 @@ def create_dataset(audio_conf, manifest_filepath, labels, normalize, batch_size,
audio_conf: Config containing the sample rate, window and the window length/stride in seconds
manifest_filepath (str): manifest_file path.
labels (list): list containing all the possible characters to map to
normalize: Apply standard mean and deviation normalization to audio tensor
normalize: Apply standard mean and deviation Normalization to audio tensor
train_mode (bool): Whether dataset is use for train or eval (default=True).
batch_size (int): Dataset batch size
rank (int): The shard ID within num_shards (default=None).

View File

@ -75,13 +75,13 @@ Dataset used: [The LJ Speech Dataset](<https://keithito.com/LJ-Speech-Dataset>)
├──egs // Note the egs folder should be downloaded from the above link
├──utils // Note the utils folder should be downloaded from the above link
├── audio.py // Audio utils. Note this script should be downloaded from the above link
├── compute-meanvar-stats.py // Compute mean-variance normalization stats. Note this script should be downloaded from the above link
├── compute-meanvar-stats.py // Compute mean-variance Normalization stats. Note this script should be downloaded from the above link
├── evaluate.py // Evaluation
├── export.py // Convert mindspore model to air model
├── hparams.py // Hyper-parameter configuration. Note this script should be downloaded from the above link
├── mksubset.py // Make subset of dataset. Note this script should be downloaded from the above link
├── preprocess.py // Preprocess dataset. Note this script should be downloaded from the above link
├── preprocess_normalize.py // Perform meanvar normalization to preprocessed features. Note this script should be downloaded from the above link
├── preprocess_normalize.py // Perform meanvar Normalization to preprocessed features. Note this script should be downloaded from the above link
├── README.md // Descriptions about WaveNet
├── train.py // Training scripts
├── train_pytorch.py // Note this script should be downloaded from the above link. The initial name of this script is train.py in the project from the link

View File

@ -30,7 +30,7 @@ class CRF(nn.Cell):
Args:
tag_to_index: The dict for tag to index mapping with extra "<START>" and "<STOP>"sign.
batch_size: Batch size, i.e., the length of the first dimension.
seq_length: Sequence length, i.e., the length of the second dimention.
seq_length: Sequence length, i.e., the length of the second dimension.
is_training: Specifies whether to use training mode.
Returns:
Training mode: Tensor, total loss.
@ -74,7 +74,7 @@ class CRF(nn.Cell):
def log_sum_exp(self, logits):
'''
Compute the log_sum_exp score for normalization factor.
Compute the log_sum_exp score for Normalization factor.
'''
max_score = self.reduce_max(logits, -1) #16 5 5
score = self.log(self.reduce_sum(self.exp(logits - max_score), -1))

View File

@ -31,7 +31,7 @@ GENERATE_GOLDEN = False
def normalize_np(image, mean, std):
"""
Apply the normalization
Apply the Normalization
"""
# DE decodes the image in RGB by default, hence
# the values here are in RGB