!37717 add adaptive_maxpool3d rank dynamic

Merge pull request !37717 from 范吉斌/dynamic_rank_adaptive_maxpool3d
This commit is contained in:
i-robot 2022-07-11 06:16:42 +00:00 committed by Gitee
commit 9eab318ac7
No known key found for this signature in database
GPG Key ID: 173E9B9CA92EEF8F
2 changed files with 43 additions and 1 deletions

View File

@ -26,12 +26,20 @@ namespace {
constexpr int64_t kInputDims4 = 4;
constexpr int64_t kInputDims5 = 5;
constexpr int64_t kOutputSizeNumElem = 3;
constexpr int64_t kDynamicRankValue = -2;
constexpr size_t kDynamicRankLen = 1;
abstract::TupleShapePtr AdaptiveMaxPool3DInferShape(const PrimitivePtr &primitive,
const std::vector<AbstractBasePtr> &input_args) {
auto x_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[0]->BuildShape())[kShape];
auto output_size_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[1]->BuildShape())[kShape];
const int64_t input_num_dims = SizeToLong(x_shape.size());
std::shared_ptr<mindspore::abstract::Shape> out_shape_ptr;
if (x_shape.size() == kDynamicRankLen && x_shape[0] == kDynamicRankValue) {
ShapeVector out_shape = {kDynamicRankValue};
out_shape_ptr = std::make_shared<abstract::Shape>(out_shape);
return std::make_shared<abstract::TupleShape>(std::vector<abstract::BaseShapePtr>{out_shape_ptr, out_shape_ptr});
}
const int64_t output_size_dim = SizeToLong(output_size_shape.size());
CheckAndConvertUtils::CheckInRange("x_dim", input_num_dims, kIncludeBoth, {kInputDims4, kInputDims5},
kNameAdaptiveMaxPool3D);
@ -40,7 +48,6 @@ abstract::TupleShapePtr AdaptiveMaxPool3DInferShape(const PrimitivePtr &primitiv
kNameAdaptiveMaxPool3D);
auto output_size = input_args[1];
std::shared_ptr<mindspore::abstract::Shape> out_shape_ptr;
auto output_size_value = output_size->BuildValue();
MS_EXCEPTION_IF_NULL(output_size_value);
if (output_size->isa<abstract::AbstractTensor>() && !output_size_value->isa<None>() &&

View File

@ -23,6 +23,7 @@ from mindspore import Tensor
from mindspore.ops import operations as P
from mindspore.ops.operations.nn_ops import AdaptiveMaxPool3D
from mindspore.ops.functional import vmap
import mindspore.numpy as ms_np
context.set_context(mode=context.GRAPH_MODE, device_target='GPU')
@ -51,6 +52,40 @@ class DynamicShapeNet(nn.Cell):
return self.net(x, output_size)
class RankDynamicNet(nn.Cell):
def __init__(self):
super().__init__()
self.adaptive_max_pool3d = AdaptiveMaxPool3D()
self.reduce = P.ReduceSum(keep_dims=False)
def construct(self, x, output_size):
rand_axis = ms_np.randint(1, 3, (2,))
axis = ms_np.unique(rand_axis)
in_x = self.reduce(x, axis)
out = self.adaptive_max_pool3d(in_x, output_size)
return out, in_x
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_rank_dynamic():
"""
Feature: test rank dynamic shape.
Description: test AdaptiveMaxPool3D op rank dynamic shape.
Expectation: expect correct result.
"""
input_x = Tensor(np.random.randn(8, 3, 5, 8, 5, 6).astype(np.float32))
output_size = Tensor(np.array([2, 2, 2]).astype(np.int32))
dyn_net = RankDynamicNet()
dyn_output, in_x = dyn_net(input_x, output_size)
net = Net()
output = net(in_x, output_size)
assert (dyn_output[0].asnumpy() == output[0].asnumpy()).all()
assert (dyn_output[1].asnumpy() == output[1].asnumpy()).all()
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard