add reversev2 data type and test case

This commit is contained in:
huangyong 2022-10-20 14:40:36 +08:00
parent b3da2ebeb5
commit cdd9e64d67
4 changed files with 89 additions and 0 deletions

View File

@ -221,6 +221,7 @@ Array Methods
mindspore.Tensor.reshape
mindspore.Tensor.resize
mindspore.Tensor.reverse
mindspore.Tensor.reverse_sequence
mindspore.Tensor.scatter_add
mindspore.Tensor.scatter_div
mindspore.Tensor.scatter_max

View File

@ -131,9 +131,18 @@ std::vector<std::pair<KernelAttr, ReverseV2GpuKernelMod::ReverseV2LaunchFunc>> R
{KernelAttr().AddInputAttr(kNumberTypeFloat32).AddOutputAttr(kNumberTypeFloat32),
&ReverseV2GpuKernelMod::LaunchKernel<float>},
{KernelAttr().AddInputAttr(kNumberTypeFloat64).AddOutputAttr(kNumberTypeFloat64),
&ReverseV2GpuKernelMod::LaunchKernel<double>},
{KernelAttr().AddInputAttr(kNumberTypeUInt8).AddOutputAttr(kNumberTypeUInt8),
&ReverseV2GpuKernelMod::LaunchKernel<uint8_t>},
{KernelAttr().AddInputAttr(kNumberTypeUInt16).AddOutputAttr(kNumberTypeUInt16),
&ReverseV2GpuKernelMod::LaunchKernel<uint16_t>},
{KernelAttr().AddInputAttr(kNumberTypeInt8).AddOutputAttr(kNumberTypeInt8),
&ReverseV2GpuKernelMod::LaunchKernel<int8_t>},
{KernelAttr().AddInputAttr(kNumberTypeInt16).AddOutputAttr(kNumberTypeInt16),
&ReverseV2GpuKernelMod::LaunchKernel<int16_t>},

View File

@ -63,10 +63,22 @@ template CUDA_LIB_EXPORT void CalReverseV2<float>(const float* input, float* out
const int64_t* strides, const int64_t* axis, size_t input_size,
size_t axis_size, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalReverseV2<double>(const double* input, double* output, const size_t* input_shape,
const int64_t* strides, const int64_t* axis, size_t input_size,
size_t axis_size, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalReverseV2<uint8_t>(const uint8_t* input, uint8_t* output, const size_t* input_shape,
const int64_t* strides, const int64_t* axis, size_t input_size,
size_t axis_size, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalReverseV2<uint16_t>(const uint16_t* input, uint16_t* output, const size_t* input_shape,
const int64_t* strides, const int64_t* axis, size_t input_size,
size_t axis_size, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalReverseV2<int8_t>(const int8_t* input, int8_t* output, const size_t* input_shape,
const int64_t* strides, const int64_t* axis, size_t input_size,
size_t axis_size, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalReverseV2<int16_t>(const int16_t* input, int16_t* output, const size_t* input_shape,
const int64_t* strides, const int64_t* axis, size_t input_size,
size_t axis_size, cudaStream_t cuda_stream);

View File

@ -17,6 +17,7 @@ import pytest
import mindspore.context as context
import mindspore.nn as nn
import mindspore as ms
from mindspore import Tensor
from mindspore.ops import operations as P
@ -69,6 +70,18 @@ def test_reverse_v2_float32():
reverse_v2_1d(np.float32)
reverse_v2_3d(np.float32)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_reverse_v2_float64():
"""
Feature: test ReverseV2 with using float64.
Description: input input_x and axis, test the output value
Expectation: the result match with expect.
"""
reverse_v2_1d(np.float64)
reverse_v2_3d(np.float64)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
@ -76,6 +89,30 @@ def test_reverse_v2_uint8():
reverse_v2_1d(np.uint8)
reverse_v2_3d(np.uint8)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_reverse_v2_uint16():
"""
Feature: test ReverseV2 with using uint16.
Description: input input_x and axis, test the output value
Expectation: the result match with expect.
"""
reverse_v2_1d(np.uint16)
reverse_v2_3d(np.uint16)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_reverse_v2_int8():
"""
Feature: test ReverseV2 with using int8.
Description: input input_x and axis, test the output value
Expectation: the result match with expect.
"""
reverse_v2_1d(np.int8)
reverse_v2_3d(np.int8)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
@ -113,3 +150,33 @@ def test_reverse_v2_invalid_axis():
reverse_v2_net = ReverseV2Net((-2, -1, 3))
_ = reverse_v2_net(x)
assert "'axis' cannot contain duplicate dimensions" in str(info.value)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_reverse_v2_tensor_api():
"""
Feature: ReverseV2 GPU operation
Description: input axis, test the output value
Expectation: the values match the predefined values
"""
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
input_x = Tensor(np.array([[1, 2, 3, 4], [5, 6, 7, 8]]), ms.int32)
output = input_x.reverse(axis=[1])
expected = np.array([[4, 3, 2, 1], [8, 7, 6, 5]]).astype(np.int32)
assert np.array_equal(output.asnumpy(), expected)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_reverse_v2_functional_api():
"""
Feature: ReverseV2 GPU operation
Description: input input_x and axis, test the output value
Expectation: the values match the predefined values
"""
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
input_x = Tensor(np.array([[1, 2, 3, 4], [5, 6, 7, 8]]), ms.int32)
output = ms.ops.reverse(input_x, axis=[1])
expected = np.array([[4, 3, 2, 1], [8, 7, 6, 5]]).astype(np.int32)
assert np.array_equal(output.asnumpy(), expected)