forked from mindspore-Ecosystem/mindspore
!13335 Fix check of input dims for BiasAdd and attr of maxpool3d .
From: @liu_xiao_93 Reviewed-by: @liangchenghui Signed-off-by: @liangchenghui
This commit is contained in:
commit
2545e2c5f1
|
@ -26,6 +26,7 @@ transpose_d_op_info = TBERegOp("Transpose") \
|
|||
.attr("perm", "optional", "listInt", "all") \
|
||||
.input(0, "x", False, "required", "all") \
|
||||
.output(0, "y", False, "required", "all") \
|
||||
.need_check_supported(True) \
|
||||
.dtype_format(DataType.BOOL_Default, DataType.BOOL_Default) \
|
||||
.dtype_format(DataType.I8_Default, DataType.I8_Default) \
|
||||
.dtype_format(DataType.U8_Default, DataType.U8_Default) \
|
||||
|
|
|
@ -1904,7 +1904,7 @@ class MaxPoolWithArgmax(_Pool):
|
|||
|
||||
class MaxPool3D(PrimitiveWithInfer):
|
||||
r"""
|
||||
Max pooling operation.
|
||||
3D max pooling operation.
|
||||
|
||||
Applies a 3D max pooling over an input Tensor which can be regarded as a composition of 3D planes.
|
||||
|
||||
|
@ -1947,7 +1947,7 @@ class MaxPool3D(PrimitiveWithInfer):
|
|||
TypeError: If `pad_mode` or `data_format` is not a string.
|
||||
ValueError: If numbers in `kernel_size` or `strides` are not positive.
|
||||
ValueError: If `pad_mode` is not one of 'same', 'valid'.
|
||||
ValueError: If `kernel_size` or `strides` is a tuple whose length is not equal to 3 or 5.
|
||||
ValueError: If `kernel_size` or `strides` is a tuple whose length is not equal to 3.
|
||||
ValueError: If `data_format` is not 'NCDHW'.
|
||||
|
||||
Supported Platforms:
|
||||
|
@ -1971,9 +1971,10 @@ class MaxPool3D(PrimitiveWithInfer):
|
|||
self.pad_mode = validator.check_string(pad_mode.upper(), ['VALID', 'SAME'], 'pad_mode', self.name)
|
||||
self.add_prim_attr("pad_mode", self.pad_mode)
|
||||
self.data_format = validator.check_string(data_format, ['NCDHW'], 'data_format', self.name)
|
||||
self.kernel_size = _check_3d_int_or_tuple("kernel_size", kernel_size, self.name, allow_five=True, ret_five=True)
|
||||
self.kernel_size = _check_3d_int_or_tuple("kernel_size", kernel_size, self.name,
|
||||
allow_five=False, ret_five=True)
|
||||
self.add_prim_attr("kernel_size", self.kernel_size)
|
||||
self.strides = _check_3d_int_or_tuple("strides", strides, self.name, allow_five=True, ret_five=True)
|
||||
self.strides = _check_3d_int_or_tuple("strides", strides, self.name, allow_five=False, ret_five=True)
|
||||
self.add_prim_attr("strides", self.strides)
|
||||
|
||||
def infer_shape(self, x_shape):
|
||||
|
@ -2274,7 +2275,7 @@ class BiasAdd(PrimitiveWithCheck):
|
|||
self.add_prim_attr('data_format', self.format)
|
||||
|
||||
def check_shape(self, x_shape, b_shape):
|
||||
validator.check_int(len(x_shape), 2, Rel.GE, "x rank", self.name)
|
||||
validator.check_int_range(len(x_shape), 2, 5, Rel.INC_BOTH, "x rank", self.name)
|
||||
if self.format == "NCDHW" and (len(x_shape) != 5 or context.get_context("device_target") != "Ascend"):
|
||||
raise ValueError("NCDHW format only support 5-dims input in Ascend target.")
|
||||
validator.check_equal_int(len(b_shape), 1, "bias rank", self.name)
|
||||
|
|
|
@ -81,27 +81,3 @@ def test_bias_add5d():
|
|||
expect_output = np.ones([2, 5, 4, 4, 4]).astype(np.float32) * 2
|
||||
print(output)
|
||||
assert np.all(output.asnumpy() == expect_output), "bias_add execute failed, please check current code commit"
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_cpu
|
||||
@pytest.mark.env_onecard
|
||||
def test_bias_add6d():
|
||||
x = np.ones([2, 4, 4, 4, 4, 1]).astype(np.float32)
|
||||
b = np.array([1, 1, 1, 1]).astype(np.float32)
|
||||
bias_add = Net()
|
||||
output = bias_add(Tensor(x), Tensor(b))
|
||||
expect_output = np.ones([2, 4, 4, 4, 4, 1]).astype(np.float32) * 2
|
||||
print(output)
|
||||
assert np.all(output.asnumpy() == expect_output), "bias_add execute failed, please check current code commit"
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_cpu
|
||||
@pytest.mark.env_onecard
|
||||
def test_bias_add7d():
|
||||
x = np.ones([2, 4, 4, 4, 4, 1, 2]).astype(np.float32)
|
||||
b = np.array([1, 1, 1, 1]).astype(np.float32)
|
||||
bias_add = Net()
|
||||
output = bias_add(Tensor(x), Tensor(b))
|
||||
expect_output = np.ones([2, 4, 4, 4, 4, 1, 2]).astype(np.float32) * 2
|
||||
print(output)
|
||||
assert np.all(output.asnumpy() == expect_output), "bias_add execute failed, please check current code commit"
|
||||
|
|
|
@ -65,14 +65,3 @@ def test_bias_add_grad5d():
|
|||
expect_output = np.array([64., 64., 64.]).astype(np.float32)
|
||||
print(output.asnumpy())
|
||||
assert np.all(output.asnumpy() == expect_output), "bias_add_grad execute failed, please check current code commit"
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_cpu
|
||||
@pytest.mark.env_onecard
|
||||
def test_bias_add_grad7d():
|
||||
dout = np.ones([2, 3, 4, 4, 2, 1, 10]).astype(np.float32)
|
||||
bias_add_grad = Net()
|
||||
output = bias_add_grad(Tensor(dout))
|
||||
expect_output = np.array([640., 640., 640.]).astype(np.float32)
|
||||
print(output.asnumpy())
|
||||
assert np.all(output.asnumpy() == expect_output), "bias_add_grad execute failed, please check current code commit"
|
||||
|
|
Loading…
Reference in New Issue