forked from mindspore-Ecosystem/mindspore
!49540 fix input type bug of logical_not
Merge pull request !49540 from wtcheng/master
This commit is contained in:
commit
aa2b53887d
|
@ -3,6 +3,6 @@
|
|||
|
||||
.. py:class:: mindspore.ops.LogicalNot
|
||||
|
||||
逐元素计算两个Tensor的逻辑非运算。
|
||||
逐元素计算一个Tensor的逻辑非运算。
|
||||
|
||||
详情请查看 :func:`mindspore.ops.logical_not` 。
|
||||
|
|
|
@ -3,7 +3,7 @@ mindspore.ops.logical_not
|
|||
|
||||
.. py:function:: mindspore.ops.logical_not(x)
|
||||
|
||||
逐元素计算两个Tensor的逻辑非运算。
|
||||
逐元素计算一个Tensor的逻辑非运算。
|
||||
|
||||
.. math::
|
||||
out_{i} = \neg x_{i}
|
||||
|
|
|
@ -54,34 +54,8 @@ bool LogicalNotGpuKernelMod::LaunchKernel(const std::vector<AddressPtr> &inputs,
|
|||
const std::vector<std::pair<KernelAttr, LogicalNotGpuKernelMod::KernelRunFunc>> &LogicalNotGpuKernelMod::GetFuncList()
|
||||
const {
|
||||
static const std::vector<std::pair<KernelAttr, LogicalNotGpuKernelMod::KernelRunFunc>> func_list = {
|
||||
{KernelAttr().AddInputAttr(kNumberTypeFloat64).AddOutputAttr(kNumberTypeBool),
|
||||
&LogicalNotGpuKernelMod::LaunchKernel<double>},
|
||||
{KernelAttr().AddInputAttr(kNumberTypeFloat32).AddOutputAttr(kNumberTypeBool),
|
||||
&LogicalNotGpuKernelMod::LaunchKernel<float>},
|
||||
{KernelAttr().AddInputAttr(kNumberTypeFloat16).AddOutputAttr(kNumberTypeBool),
|
||||
&LogicalNotGpuKernelMod::LaunchKernel<half>},
|
||||
{KernelAttr().AddInputAttr(kNumberTypeBool).AddOutputAttr(kNumberTypeBool),
|
||||
&LogicalNotGpuKernelMod::LaunchKernel<bool>},
|
||||
{KernelAttr().AddInputAttr(kNumberTypeInt8).AddOutputAttr(kNumberTypeBool),
|
||||
&LogicalNotGpuKernelMod::LaunchKernel<int8_t>},
|
||||
{KernelAttr().AddInputAttr(kNumberTypeInt16).AddOutputAttr(kNumberTypeBool),
|
||||
&LogicalNotGpuKernelMod::LaunchKernel<int16_t>},
|
||||
{KernelAttr().AddInputAttr(kNumberTypeInt32).AddOutputAttr(kNumberTypeBool),
|
||||
&LogicalNotGpuKernelMod::LaunchKernel<int32_t>},
|
||||
{KernelAttr().AddInputAttr(kNumberTypeInt64).AddOutputAttr(kNumberTypeBool),
|
||||
&LogicalNotGpuKernelMod::LaunchKernel<int64_t>},
|
||||
{KernelAttr().AddInputAttr(kNumberTypeUInt8).AddOutputAttr(kNumberTypeBool),
|
||||
&LogicalNotGpuKernelMod::LaunchKernel<uint8_t>},
|
||||
{KernelAttr().AddInputAttr(kNumberTypeUInt16).AddOutputAttr(kNumberTypeBool),
|
||||
&LogicalNotGpuKernelMod::LaunchKernel<uint16_t>},
|
||||
{KernelAttr().AddInputAttr(kNumberTypeUInt32).AddOutputAttr(kNumberTypeBool),
|
||||
&LogicalNotGpuKernelMod::LaunchKernel<uint32_t>},
|
||||
{KernelAttr().AddInputAttr(kNumberTypeUInt64).AddOutputAttr(kNumberTypeBool),
|
||||
&LogicalNotGpuKernelMod::LaunchKernel<uint64_t>},
|
||||
{KernelAttr().AddInputAttr(kNumberTypeComplex64).AddOutputAttr(kNumberTypeBool),
|
||||
&LogicalNotGpuKernelMod::LaunchKernel<utils::Complex<float>>},
|
||||
{KernelAttr().AddInputAttr(kNumberTypeComplex128).AddOutputAttr(kNumberTypeBool),
|
||||
&LogicalNotGpuKernelMod::LaunchKernel<utils::Complex<double>>},
|
||||
};
|
||||
return func_list;
|
||||
}
|
||||
|
|
|
@ -87,10 +87,7 @@ def test_logicalor():
|
|||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.env_onecard
|
||||
@pytest.mark.parametrize("dtype", [np.bool_, np.int8, np.int16, np.int32, np.int64,
|
||||
np.uint8, np.uint16, np.uint32, np.uint64,
|
||||
np.float16, np.float32, np.float64,
|
||||
np.complex64, np.complex128])
|
||||
@pytest.mark.parametrize("dtype", [np.bool_])
|
||||
@pytest.mark.parametrize("mode", [context.GRAPH_MODE, context.PYNATIVE_MODE])
|
||||
def test_logicalnot(dtype, mode):
|
||||
"""
|
||||
|
|
Loading…
Reference in New Issue