fix fractional_avg_pool_grad ops dynamic infer
This commit is contained in:
parent
89bfdefb4a
commit
a4d45e8bb5
|
@ -31,6 +31,7 @@ namespace ops {
|
|||
namespace {
|
||||
constexpr size_t kInpuSizes = 4;
|
||||
constexpr size_t kInpuDims = 1;
|
||||
constexpr int64_t kDynamicRankValue = -2;
|
||||
abstract::ShapePtr FractionalAvgPoolGradInferShape(const PrimitivePtr &primitive,
|
||||
const std::vector<AbstractBasePtr> &input_args) {
|
||||
MS_EXCEPTION_IF_NULL(primitive);
|
||||
|
@ -55,17 +56,25 @@ abstract::ShapePtr FractionalAvgPoolGradInferShape(const PrimitivePtr &primitive
|
|||
auto shape_ptr = std::make_shared<abstract::Shape>(
|
||||
CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[kInputIndex0]->BuildShape())[kShape]);
|
||||
auto shape_v = shape_ptr->shape();
|
||||
if (shape_v[kInputIndex0] != SizeToLong(kInpuSizes)) {
|
||||
MS_EXCEPTION(ValueError) << "For '" << op_name << "', the input 'orig_input_tensor_shape' tensor shape must = 4.";
|
||||
}
|
||||
if (shape_v.size() > kInpuDims) {
|
||||
MS_EXCEPTION(ValueError) << "For '" << op_name
|
||||
<< "', the input 'orig_input_tensor_shape' tensor must be a 1-D tensor.";
|
||||
}
|
||||
|
||||
std::vector<int64_t> output_shape;
|
||||
if (IsDynamicRank(shape_v)) {
|
||||
output_shape.push_back(kDynamicRankValue);
|
||||
return std::make_shared<abstract::Shape>(output_shape);
|
||||
}
|
||||
|
||||
if (IsDynamic(shape_v)) {
|
||||
output_shape = {abstract::Shape::kShapeDimAny, abstract::Shape::kShapeDimAny, abstract::Shape::kShapeDimAny,
|
||||
abstract::Shape::kShapeDimAny};
|
||||
return std::make_shared<abstract::Shape>(output_shape);
|
||||
}
|
||||
|
||||
if (!input_args[kInputIndex0]->BuildValue()->isa<AnyValue>() &&
|
||||
!input_args[kInputIndex0]->BuildValue()->isa<None>()) {
|
||||
std::vector<int64_t> output_shape;
|
||||
int64_t shape_m = 1;
|
||||
auto input_shape_ptr = reinterpret_cast<int64_t *>(input_shape_tensor->data_c());
|
||||
for (auto i = 0; i < shape_v[kInputIndex0]; ++i) {
|
||||
|
@ -83,7 +92,6 @@ abstract::ShapePtr FractionalAvgPoolGradInferShape(const PrimitivePtr &primitive
|
|||
return std::make_shared<abstract::Shape>(output_shape);
|
||||
} else {
|
||||
const uint32_t input_shapes = static_cast<uint32_t>(std::pow(max_length, 1.0 / shape_v[kInputIndex0]));
|
||||
std::vector<int64_t> output_shape;
|
||||
ShapeVector shape_min;
|
||||
ShapeVector shape_max;
|
||||
for (int i = 0; i < shape_v[kInputIndex0]; i++) {
|
||||
|
|
|
@ -230,3 +230,155 @@ def test_fractionalavgpool_pynative_dynamic():
|
|||
assert np.allclose(output_y, expect_output_y)
|
||||
assert np.allclose(output_row_pooling_sequence, expect_output_row_pooling_sequence)
|
||||
assert np.allclose(output_col_pooling_sequence, expect_output_col_pooling_sequence)
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_cpu
|
||||
@pytest.mark.platform_arm_cpu
|
||||
@pytest.mark.env_onecard
|
||||
def test_fractionalavgpoolgrad_graph_dynamic():
|
||||
"""
|
||||
Feature: FractionalAvgPool
|
||||
Description: Test of input
|
||||
Expectation: The results are as expected
|
||||
"""
|
||||
context.set_context(mode=context.GRAPH_MODE, device_target='CPU')
|
||||
types = [np.float32, np.float64, np.int32, np.int64]
|
||||
for type_i in types:
|
||||
x = Tensor(np.array([1, 2, 3, 4, 5, 6, 7, 8, 9,
|
||||
10, 11, 12, 13, 14, 15, 16]).reshape([1, 4, 4, 1]).astype(type_i))
|
||||
net = NetFractionalAvgPool()
|
||||
output = net(x)
|
||||
output_y = output[0].asnumpy()
|
||||
output_row_pooling_sequence = output[1].asnumpy()
|
||||
output_col_pooling_sequence = output[2].asnumpy()
|
||||
expect_output_y = np.array([[[[3.5], [5.5]], [[11.5], [13.5]]]]).astype(type_i)
|
||||
expect_output_row_pooling_sequence = np.array([0, 2, 4]).astype(np.int64)
|
||||
expect_output_col_pooling_sequence = np.array([0, 2, 4]).astype(np.int64)
|
||||
assert np.allclose(output_y, expect_output_y)
|
||||
assert np.allclose(output_row_pooling_sequence, expect_output_row_pooling_sequence)
|
||||
assert np.allclose(output_col_pooling_sequence, expect_output_col_pooling_sequence)
|
||||
|
||||
net = NetFractionalAvgPoolRealRandom()
|
||||
output = net(x)
|
||||
type0 = output[0].asnumpy().dtype
|
||||
assert type0 == type_i
|
||||
|
||||
net = NetFractionalAvgPoolOverlapPing()
|
||||
output = net(x)
|
||||
output_y = output[0].asnumpy()
|
||||
output_row_pooling_sequence = output[1].asnumpy()
|
||||
output_col_pooling_sequence = output[2].asnumpy()
|
||||
expect_output_y = np.array([[[[6], [7.5]], [[12], [13.5]]]]).astype(type_i)
|
||||
expect_output_row_pooling_sequence = np.array([0, 2, 4]).astype(np.int64)
|
||||
expect_output_col_pooling_sequence = np.array([0, 2, 4]).astype(np.int64)
|
||||
assert np.allclose(output_y, expect_output_y)
|
||||
assert np.allclose(output_row_pooling_sequence, expect_output_row_pooling_sequence)
|
||||
assert np.allclose(output_col_pooling_sequence, expect_output_col_pooling_sequence)
|
||||
|
||||
netgrad = NetFractionalAvgPoolGrad()
|
||||
x_shape = Tensor(np.array([1, 4, 4, 1]).astype(np.int64))
|
||||
out_backprop = Tensor(np.ones([1, 2, 2, 1]).astype(type_i))
|
||||
|
||||
set_input_dyn = []
|
||||
dy_shape_0 = [None for _ in x_shape.shape]
|
||||
input_dyn_0 = Tensor(shape=dy_shape_0, dtype=x_shape.dtype)
|
||||
set_input_dyn.append(input_dyn_0)
|
||||
|
||||
dy_shape_1 = [None for _ in out_backprop.shape]
|
||||
input_dyn_1 = Tensor(shape=dy_shape_1, dtype=out_backprop.dtype)
|
||||
set_input_dyn.append(input_dyn_1)
|
||||
|
||||
dy_shape_2 = [None for _ in output[1].shape]
|
||||
input_dyn_2 = Tensor(shape=dy_shape_2, dtype=output[1].dtype)
|
||||
set_input_dyn.append(input_dyn_2)
|
||||
|
||||
dy_shape_3 = [None for _ in output[2].shape]
|
||||
input_dyn_3 = Tensor(shape=dy_shape_3, dtype=output[2].dtype)
|
||||
set_input_dyn.append(input_dyn_3)
|
||||
|
||||
netgrad.set_inputs(*set_input_dyn)
|
||||
|
||||
output_grad = netgrad(x_shape, out_backprop, output[1], output[2])
|
||||
output_grad_y = output_grad[0].asnumpy()
|
||||
expect_output_grad_y = np.array([[[[0.25], [0.25], [0.25], [0.25]],
|
||||
[[0.25], [0.25], [0.25], [0.25]],
|
||||
[[0.25], [0.25], [0.25], [0.25]],
|
||||
[[0.25], [0.25], [0.25], [0.25]]]]).astype(type_i)
|
||||
assert np.allclose(output_grad_y, expect_output_grad_y)
|
||||
|
||||
|
||||
@pytest.mark.level1
|
||||
@pytest.mark.platform_x86_cpu
|
||||
@pytest.mark.platform_arm_cpu
|
||||
@pytest.mark.env_onecard
|
||||
def test_fractionalavgpoolgrad_pynative_dynamic():
|
||||
"""
|
||||
Feature: FractionalAvgPool
|
||||
Description: Test of input
|
||||
Expectation: The results are as expected
|
||||
"""
|
||||
context.set_context(mode=context.PYNATIVE_MODE, device_target='CPU')
|
||||
types = [np.float32, np.float64, np.int32, np.int64]
|
||||
for type_i in types:
|
||||
x = Tensor(np.array([1, 2, 3, 4, 5, 6, 7, 8, 9,
|
||||
10, 11, 12, 13, 14, 15, 16]).reshape([1, 4, 4, 1]).astype(type_i))
|
||||
net = NetFractionalAvgPool()
|
||||
output = net(x)
|
||||
output_y = output[0].asnumpy()
|
||||
output_row_pooling_sequence = output[1].asnumpy()
|
||||
output_col_pooling_sequence = output[2].asnumpy()
|
||||
expect_output_y = np.array([[[[3.5], [5.5]], [[11.5], [13.5]]]]).astype(type_i)
|
||||
expect_output_row_pooling_sequence = np.array([0, 2, 4]).astype(np.int64)
|
||||
expect_output_col_pooling_sequence = np.array([0, 2, 4]).astype(np.int64)
|
||||
assert np.allclose(output_y, expect_output_y)
|
||||
assert np.allclose(output_row_pooling_sequence, expect_output_row_pooling_sequence)
|
||||
assert np.allclose(output_col_pooling_sequence, expect_output_col_pooling_sequence)
|
||||
|
||||
net = NetFractionalAvgPoolRealRandom()
|
||||
output = net(x)
|
||||
type0 = output[0].asnumpy().dtype
|
||||
assert type0 == type_i
|
||||
|
||||
net = NetFractionalAvgPoolOverlapPing()
|
||||
output = net(x)
|
||||
output_y = output[0].asnumpy()
|
||||
output_row_pooling_sequence = output[1].asnumpy()
|
||||
output_col_pooling_sequence = output[2].asnumpy()
|
||||
expect_output_y = np.array([[[[6], [7.5]], [[12], [13.5]]]]).astype(type_i)
|
||||
expect_output_row_pooling_sequence = np.array([0, 2, 4]).astype(np.int64)
|
||||
expect_output_col_pooling_sequence = np.array([0, 2, 4]).astype(np.int64)
|
||||
assert np.allclose(output_y, expect_output_y)
|
||||
assert np.allclose(output_row_pooling_sequence, expect_output_row_pooling_sequence)
|
||||
assert np.allclose(output_col_pooling_sequence, expect_output_col_pooling_sequence)
|
||||
|
||||
netgrad = NetFractionalAvgPoolGrad()
|
||||
x_shape = Tensor(np.array([1, 4, 4, 1]).astype(np.int64))
|
||||
out_backprop = Tensor(np.ones([1, 2, 2, 1]).astype(type_i))
|
||||
|
||||
set_input_dyn = []
|
||||
dy_shape_0 = [None for _ in x_shape.shape]
|
||||
input_dyn_0 = Tensor(shape=dy_shape_0, dtype=x_shape.dtype)
|
||||
set_input_dyn.append(input_dyn_0)
|
||||
|
||||
dy_shape_1 = [None for _ in out_backprop.shape]
|
||||
input_dyn_1 = Tensor(shape=dy_shape_1, dtype=out_backprop.dtype)
|
||||
set_input_dyn.append(input_dyn_1)
|
||||
|
||||
dy_shape_2 = [None for _ in output[1].shape]
|
||||
input_dyn_2 = Tensor(shape=dy_shape_2, dtype=output[1].dtype)
|
||||
set_input_dyn.append(input_dyn_2)
|
||||
|
||||
dy_shape_3 = [None for _ in output[2].shape]
|
||||
input_dyn_3 = Tensor(shape=dy_shape_3, dtype=output[2].dtype)
|
||||
set_input_dyn.append(input_dyn_3)
|
||||
|
||||
netgrad.set_inputs(*set_input_dyn)
|
||||
|
||||
output_grad = netgrad(x_shape, out_backprop, output[1], output[2])
|
||||
output_grad_y = output_grad[0].asnumpy()
|
||||
expect_output_grad_y = np.array([[[[0.25], [0.25], [0.25], [0.25]],
|
||||
[[0.25], [0.25], [0.25], [0.25]],
|
||||
[[0.25], [0.25], [0.25], [0.25]],
|
||||
[[0.25], [0.25], [0.25], [0.25]]]]).astype(type_i)
|
||||
assert np.allclose(output_grad_y, expect_output_grad_y)
|
||||
|
|
Loading…
Reference in New Issue