!44451 [ST][MS][OPS] ops.bartlett_window & ops.mse_loss APIs and STs.

Merge pull request !44451 from alashkari/new-apis-oct-24
This commit is contained in:
i-robot 2022-10-31 03:02:53 +00:00 committed by Gitee
commit 1b59e7d834
No known key found for this signature in database
GPG Key ID: 173E9B9CA92EEF8F
10 changed files with 380 additions and 5 deletions

View File

@ -62,6 +62,7 @@ mindspore.ops.function
mindspore.ops.binary_cross_entropy_with_logits
mindspore.ops.cross_entropy
mindspore.ops.hinge_embedding_loss
mindspore.ops.mse_loss
mindspore.ops.nll_loss
mindspore.ops.smooth_l1_loss
@ -497,6 +498,7 @@ Parameter操作函数
:nosignatures:
:template: classtemplate.rst
mindspore.ops.bartlett_window
mindspore.ops.blackman_window
其他函数

View File

@ -63,6 +63,7 @@ Loss Functions
mindspore.ops.binary_cross_entropy_with_logits
mindspore.ops.cross_entropy
mindspore.ops.hinge_embedding_loss
mindspore.ops.mse_loss
mindspore.ops.nll_loss
mindspore.ops.smooth_l1_loss
@ -497,6 +498,7 @@ Spectral Functions
:nosignatures:
:template: classtemplate.rst
mindspore.ops.bartlett_window
mindspore.ops.blackman_window
Other Functions

View File

@ -7053,6 +7053,8 @@ class Tensor(Tensor_):
of `index` should be in [0, b), where the b is the size of input tensor in the `dim` dimension.
source (Tensor): The input tensor with the value to add. Must have same data type as input tensor.
The shape must be the same as input tensor except the `dim` th dimension.
Keyword args:
alpha (number.Number): the scalar multiplier for `source`. Default: 1.
Returns:

View File

@ -354,6 +354,7 @@ from .nn_func import (
hinge_embedding_loss,
lp_pool1d,
lp_pool2d,
mse_loss,
)
from .linalg_func import (
svd,
@ -428,7 +429,8 @@ from .image_func import (
crop_and_resize
)
from .spectral_func import (
blackman_window
blackman_window,
bartlett_window,
)
__all__ = []

View File

@ -4186,6 +4186,70 @@ def lp_pool2d(x, norm_type, kernel_size, stride=None, ceil_mode=False):
return ((sign(out) * ops.relu(ops.abs(out))) * (kw * kh)).pow(1.0 / norm_type)
def mse_loss(input_x, target, reduction='mean'):
r"""
Calculates the mean squared error between the predicted value and the label value.
For detailed information, please refer to :class:`mindspore.nn.MSELoss`.
Args:
input_x (Tensor): Tensor of any dimension.
target (Tensor): The input label. Tensor of any dimension, same shape as the `input_x` in common cases.
However, it supports that the shape of `input_x` is different from the shape of `target`
and they should be broadcasted to each other.
reduction (str): Type of reduction to be applied to loss. The optional values are "mean", "none" and "sum".
Default: "mean".
Returns:
Tensor, loss of type float, the shape is zero if `reduction` is 'mean' or 'sum',
while the shape of output is the broadcasted shape if `reduction` is 'none'.
Raises:
ValueError: If `reduction` is not one of 'none', 'mean' or 'sum'.
ValueError: If `input_x` and `target` have different shapes and cannot be broadcasted.
Supported Platforms:
``Ascend`` ``CPU`` ``GPU``
Examples:
>>> logits = Tensor(np.array([1, 2, 3]), mindspore.float32)
>>> labels = Tensor(np.array([[1, 1, 1], [1, 2, 2]]), mindspore.float32)
>>> output = ops.mse_loss(logits, labels, reduction='none')
>>> print(output)
[[0. 1. 4.]
[0. 0. 1.]]
"""
if not isinstance(input_x, (Tensor, Tensor_)):
raise TypeError("For ops.mse_loss, the `input_x` must be tensor")
if not isinstance(target, (Tensor, Tensor_)):
raise TypeError("For ops.mse_loss, the `target` must be tensor")
if reduction not in ['mean', 'none', 'sum']:
raise ValueError("For ops.mse_loss, `reduction` value should be either 'mean', 'none' or 'sum'.")
x = _get_cache_prim(P.Square)()(input_x - target)
input_dtype = x.dtype
x = _get_cache_prim(P.Cast)()(x, mstype.float32)
average_flag = True
reduce_flag = True
if reduction == 'sum':
average_flag = False
if reduction == 'none':
reduce_flag = False
perm = _get_cache_prim(P.Range)()(Tensor(0, mstype.int32),
Tensor(len(x.shape), mstype.int32),
Tensor(1, mstype.int32))
if reduce_flag and average_flag:
x = _get_cache_prim(P.ReduceMean)()(x, perm)
if reduce_flag and not average_flag:
x = _get_cache_prim(P.ReduceSum)()(x, perm)
return _get_cache_prim(P.Cast)()(x, input_dtype)
__all__ = [
'adaptive_avg_pool1d',
'adaptive_avg_pool2d',
@ -4248,5 +4312,6 @@ __all__ = [
'max_unpool1d',
'max_unpool2d',
'max_unpool3d',
'mse_loss',
]
__all__.sort()

View File

@ -20,7 +20,7 @@ from mindspore.common import dtype as mstype
from .._primitive_cache import _get_cache_prim
def blackman_window(window_length, periodic=True, dtype=mstype.float32):
def blackman_window(window_length, periodic=True, *, dtype=None):
r"""
Blackman window function.
@ -42,11 +42,14 @@ def blackman_window(window_length, periodic=True, dtype=mstype.float32):
The input data should be an integer with a value of [0, 1000000].
periodic (bool): If True, returns a window to be used as periodic function.
If False, return a symmetric window. Default: True.
dtype (mindspore.dtype): the desired data type of returned tensor. Only float16, float32 and float64 is allowed.
Default: mindspore.float32.
Keyword args:
dtype (mindspore.dtype): the desired data type of returned tensor.
Only float16, float32 and float64 is allowed. Default: None.
Returns:
A 1-D tensor of size `window_length` containing the window. Its datatype is set by the attr `dtype`.
If 'dtype' is None, output datatype is float32.
Raises:
TypeError: If `window_length` is not a Tensor.
@ -67,13 +70,74 @@ def blackman_window(window_length, periodic=True, dtype=mstype.float32):
8.4922993e-01 1.0000000e+00 8.4922981e-01 5.0978690e-01
2.0077008e-01 4.0212870e-02]
"""
if dtype is None:
dtype = mstype.float32
blackman_window_op = _get_cache_prim(P.BlackmanWindow)(periodic, dtype)
return blackman_window_op(window_length)
def bartlett_window(window_length, periodic=True, *, dtype=None):
r"""
Bartlett window function.
The input `window_length` is a tensor that datatype must be a integer, which controlling the returned window size.
In particular, if `window_length` = 1, the returned window contains a single value 1.
Attr `periodic` determines whether the returned window trims off the last duplicate value from the symmetric
window and is ready to be used as a periodic window with functions. Therefore, if attr `periodic` is true,
the "N" in formula is in fact `window_length` + 1.
.. math::
w[n] = 1 - \left| \frac{2n}{N-1} - 1 \right| = \begin{cases}
\frac{2n}{N - 1} & \text{if } 0 \leq n \leq \frac{N - 1}{2} \\
2 - \frac{2n}{N - 1} & \text{if } \frac{N - 1}{2} < n < N \\
\end{cases},
\text{where : N is the full window size.}
Args:
window_length (Tensor): The size of returned window, with data type int32, int64.
The input data should be an integer with a value of [0, 1000000].
periodic (bool): If True, returns a window to be used as periodic function.
If False, return a symmetric window. Default: True.
Keyword args:
dtype (mindspore.dtype): The desired datatype of returned tensor.
Only float16, float32 and float64 are allowed. Default: None.
Returns:
A 1-D tensor of size `window_length` containing the window. Its datatype is set by the attr `dtype`.
If `dtype` is None, output datatype is float32.
Raises:
TypeError: If `window_length` is not a Tensor.
TypeError: If the type of `window_length` is not one of: int32, int64.
TypeError: If `periodic` is not a bool.
TypeError: If `dtype` is not one of: float16, float32, float64.
ValueError: If the value range of `window_length` is not [0,1000000].
ValueError: If the dimension of `window_length` is not 0.
Supported Platforms:
``GPU``
Examples:
>>> window_length = Tensor(5, mstype.int32)
>>> output = ops.bartlett_window(window_length, periodic=True, dtype=mstype.float32)
>>> print(output)
[0. 0.4 0.8 0.8 0.4]
"""
if dtype is None:
dtype = mstype.float32
bartlett_window_op = _get_cache_prim(P.BartlettWindow)(periodic, dtype)
return bartlett_window_op(window_length)
__all__ = [
'blackman_window'
'blackman_window',
'bartlett_window',
]
__all__.sort()

View File

@ -0,0 +1,41 @@
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import pytest
import numpy as np
from mindspore import Tensor
import mindspore.context as context
from mindspore.ops import functional as F
from mindspore.common import dtype as mstype
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [context.GRAPH_MODE, context.PYNATIVE_MODE])
def test_mse_loss_functional_api_modes(mode):
"""
Feature: Test mse_loss functional api.
Description: Test mse_loss functional api for Graph and PyNative modes.
Expectation: The result match to the expect value.
"""
context.set_context(mode=mode, device_target="Ascend")
logits = Tensor([1, 2, 3], mstype.float32)
labels = Tensor([[1, 1, 1], [1, 2, 2]], mstype.float32)
output = F.mse_loss(logits, labels, reduction='none')
expected = np.array([[0., 1., 4.], [0., 0., 1.]], np.float32)
np.testing.assert_array_equal(output.asnumpy(), expected)

View File

@ -0,0 +1,40 @@
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import pytest
import numpy as np
from mindspore import Tensor
import mindspore.context as context
from mindspore.ops import functional as F
from mindspore.common import dtype as mstype
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [context.GRAPH_MODE, context.PYNATIVE_MODE])
def test_mse_loss_functional_api_modes(mode):
"""
Feature: Test mse_loss functional api.
Description: Test mse_loss functional api for Graph and PyNative modes.
Expectation: The result match to the expect value.
"""
context.set_context(mode=mode, device_target="CPU")
logits = Tensor([1, 2, 3], mstype.float32)
labels = Tensor([[1, 1, 1], [1, 2, 2]], mstype.float32)
output = F.mse_loss(logits, labels, reduction='none')
expected = np.array([[0., 1., 4.], [0., 0., 1.]], np.float32)
np.testing.assert_array_equal(output.asnumpy(), expected)

View File

@ -0,0 +1,117 @@
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import torch
import pytest
import mindspore.context as context
import mindspore.nn as nn
import mindspore.ops.operations.other_ops as P
from mindspore import Tensor
from mindspore.common import dtype as mstype
from mindspore.common.api import jit
from mindspore.ops import functional as F
class BartlettWindowNet(nn.Cell):
def __init__(self, periodic=True, dtype=mstype.float32):
super(BartlettWindowNet, self).__init__()
self.bartlettwindow = P.BartlettWindow(periodic=periodic, dtype=dtype)
@jit
def construct(self, input_x):
return self.bartlettwindow(input_x)
def get_dtype(dtype="float16"):
if dtype == "float16":
nptype = np.float16
msptype = mstype.float16
pttype = torch.float32
elif dtype == "float32":
nptype = np.float32
msptype = mstype.float32
pttype = torch.float32
elif dtype == "float64":
nptype = np.float64
msptype = mstype.float64
pttype = torch.float64
else:
print("The attr 'dtype' must in [float16, float32, float64]")
return nptype, msptype, pttype
def bartlett_window(periodic, dtype, loss):
context.set_context(mode=context.GRAPH_MODE, device_target='GPU')
nptype, msptype, pttype = get_dtype(dtype)
input_x_np = np.array(200, dtype=np.int32)
input_x_ms = Tensor(input_x_np)
input_x_torch = torch.tensor(input_x_np)
bartlett_window_net = BartlettWindowNet(periodic, msptype)
bartlett_window_output = bartlett_window_net(input_x_ms)
bartlett_window_expect = torch.bartlett_window(input_x_torch, periodic=periodic, dtype=pttype)
assert np.allclose(bartlett_window_output.asnumpy(), bartlett_window_expect.numpy().astype(nptype), loss, loss)
def bartlett_window_pynative(periodic, dtype, loss):
context.set_context(mode=context.PYNATIVE_MODE, device_target='GPU')
nptype, msptype, pttype = get_dtype(dtype)
input_x_np = np.array(200, dtype=np.int64)
input_x_ms = Tensor(input_x_np)
input_x_torch = torch.tensor(input_x_np)
bartlett_window_net = BartlettWindowNet(periodic, msptype)
bartlett_window_output = bartlett_window_net(input_x_ms)
bartlett_window_expect = torch.bartlett_window(input_x_torch, periodic=periodic, dtype=pttype)
assert np.allclose(bartlett_window_output.asnumpy(), bartlett_window_expect.numpy().astype(nptype), loss, loss)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_bartlett_window_graph_int32_true_float32():
"""
Feature: ALL To ALL
Description: test cases for BartlettWindow
Expectation: the result match to torch
"""
bartlett_window(periodic=True, dtype="float32", loss=1.0e-4)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_bartlett_window_pynative_int64_false_float64():
"""
Feature: ALL To ALL
Description: test cases for BartlettWindow
Expectation: the result match to torch
"""
bartlett_window_pynative(periodic=False, dtype="float64", loss=1.0e-5)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [context.GRAPH_MODE, context.PYNATIVE_MODE])
def test_bartlett_window_functional_api(mode):
"""
Feature: test bartlett_window functional api for PyNative and Graph modes.
Description: test bartlett_window functional api and compare with expected output.
Expectation: the result match with expected result.
"""
context.set_context(mode=mode, device_target="GPU")
window_length = Tensor(5, mstype.int32)
output = F.bartlett_window(window_length, periodic=True, dtype=mstype.float32)
expected = np.array([0, 0.4, 0.8, 0.8, 0.4], np.float32)
np.testing.assert_array_equal(output.asnumpy(), expected)

View File

@ -0,0 +1,40 @@
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import pytest
import numpy as np
from mindspore import Tensor
import mindspore.context as context
from mindspore.ops import functional as F
from mindspore.common import dtype as mstype
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [context.GRAPH_MODE, context.PYNATIVE_MODE])
def test_mse_loss_functional_api_modes(mode):
"""
Feature: Test mse_loss functional api.
Description: Test mse_loss functional api for Graph and PyNative modes.
Expectation: The result match to the expect value.
"""
context.set_context(mode=mode, device_target="GPU")
logits = Tensor([1, 2, 3], mstype.float32)
labels = Tensor([[1, 1, 1], [1, 2, 2]], mstype.float32)
output = F.mse_loss(logits, labels, reduction='none')
expected = np.array([[0., 1., 4.], [0., 0., 1.]], np.float32)
np.testing.assert_array_equal(output.asnumpy(), expected)