From c2167a53267ad58750f6bff8aefb95fbbec2effa Mon Sep 17 00:00:00 2001 From: linqingke Date: Mon, 13 Jun 2022 15:01:04 +0800 Subject: [PATCH] fix adaptive_maxpool2d bug. --- .jenkins/check/config/filter_cppcheck.txt | 1 + .../ir_fusion/adaptive_max_pool2d_fusion.cc | 25 ++++++++++++++++--- .../python/mindspore/ops/operations/nn_ops.py | 8 +++++- 3 files changed, 29 insertions(+), 5 deletions(-) diff --git a/.jenkins/check/config/filter_cppcheck.txt b/.jenkins/check/config/filter_cppcheck.txt index 67bddf0feb7..0f6c30c23ad 100644 --- a/.jenkins/check/config/filter_cppcheck.txt +++ b/.jenkins/check/config/filter_cppcheck.txt @@ -7,6 +7,7 @@ "mindspore/mindspore/core/abstract/ops/prim_nn.cc" "zerodivcond" "mindspore/mindspore/ccsrc/frontend/operator/ops_front_infer_function.cc" "zerodivcond" "mindspore/mindspore/ccsrc/pipeline/jit/pipeline_split.cc" "zerodivcond" +"mindspore/mindspore/ccsrc/plugin/device/ascend/optimizer/ir_fusion/adaptive_max_pool2d_fusion.cc" "zerodivcond" "mindspore/mindspore/ccsrc/plugin/device/ascend/hal/device/ascend_stream_assign.cc" "useStlAlgorithm" "mindspore/mindspore/ccsrc/frontend/operator/ops_front_infer_function.cc" "uninitvar" "mindspore/mindspore/ccsrc/pipeline/jit/action.cc" "constVariable" diff --git a/mindspore/ccsrc/plugin/device/ascend/optimizer/ir_fusion/adaptive_max_pool2d_fusion.cc b/mindspore/ccsrc/plugin/device/ascend/optimizer/ir_fusion/adaptive_max_pool2d_fusion.cc index 5d2f8c9e7dd..ed0fd981331 100644 --- a/mindspore/ccsrc/plugin/device/ascend/optimizer/ir_fusion/adaptive_max_pool2d_fusion.cc +++ b/mindspore/ccsrc/plugin/device/ascend/optimizer/ir_fusion/adaptive_max_pool2d_fusion.cc @@ -17,11 +17,13 @@ #include #include #include "plugin/device/ascend/optimizer/ascend_helper.h" +#include "backend/common/optimizer/helper.h" namespace mindspore { namespace opt { namespace { constexpr int64_t kMaxPaddingSize = 10; +constexpr int64_t kAdaptiveMaxpool2DOutputNumber = 1; std::vector ComputeKernelAttr(int64_t input_size, int64_t output_size) { int64_t padding_size = 0; @@ -115,11 +117,24 @@ const AnfNodePtr AdaptiveMaxPool2DFusion::Process(const FuncGraphPtr &func_graph int64_t height = SizeToLong(input_shape.at(kDim2)); int64_t width = SizeToLong(input_shape.at(kDim3)); - if (output_size[kDim0] <= 0 || output_size[kDim1] <= 0) { + int64_t output_h = (output_size[kDim0] == -1) ? height : output_size[kDim0]; + int64_t output_w = (output_size[kDim1] == -1) ? width : output_size[kDim1]; + if (output_h <= 0 || output_w <= 0) { MS_LOG(EXCEPTION) << "AdaptiveMaxPool2D's output_size value is invalid."; } - auto height_attr = ComputeKernelAttr(height, output_size[kDim0]); - auto width_attr = ComputeKernelAttr(width, output_size[kDim1]); + if (height % output_h != 0 || width % output_w != 0) { + auto types = {common::AnfAlgo::GetOutputInferDataType(adaptive_max_pool2d, 0), kNumberTypeInt64}; + auto shapes = {common::AnfAlgo::GetOutputDetailShape(adaptive_max_pool2d, 0), + common::AnfAlgo::GetOutputDetailShape(adaptive_max_pool2d, 0)}; + common::AnfAlgo::SetOutputTypeAndDetailShape(types, shapes, adaptive_max_pool2d.get()); + std::vector new_output_size{output_h, output_w}; + common::AnfAlgo::SetNodeAttr(kAttrOutputSize, MakeValue(new_output_size), adaptive_max_pool2d); + std::vector multi_outputs; + CreateMultipleOutputsOfAnfNode(func_graph, adaptive_max_pool2d, kAdaptiveMaxpool2DOutputNumber, &multi_outputs); + return multi_outputs[kIndex0]; + } + auto height_attr = ComputeKernelAttr(height, output_h); + auto width_attr = ComputeKernelAttr(width, output_w); if (height_attr[kIndex0] == -1 || width_attr[kIndex0] == -1) { MS_LOG(EXCEPTION) << "Current AdaptiveMaxPool2D not support this scene! node:" << node->DebugString(); } @@ -128,8 +143,10 @@ const AnfNodePtr AdaptiveMaxPool2DFusion::Process(const FuncGraphPtr &func_graph (void)pooling_inputs.insert(pooling_inputs.end(), adaptive_max_pool2d->inputs().begin() + 1, adaptive_max_pool2d->inputs().end()); auto pooling = NewCNode(pooling_inputs, kernel_graph); + auto types = {common::AnfAlgo::GetOutputInferDataType(adaptive_max_pool2d, 0)}; + auto shapes = {common::AnfAlgo::GetOutputDetailShape(adaptive_max_pool2d, 0)}; + common::AnfAlgo::SetOutputTypeAndDetailShape(types, shapes, pooling.get()); pooling->set_scope(adaptive_max_pool2d->scope()); - pooling->set_abstract(adaptive_max_pool2d->abstract()); SetNodeAttr(pooling, height_attr, width_attr); return pooling; } diff --git a/mindspore/python/mindspore/ops/operations/nn_ops.py b/mindspore/python/mindspore/ops/operations/nn_ops.py index 75f1c59cf1a..e441f5ef8f7 100644 --- a/mindspore/python/mindspore/ops/operations/nn_ops.py +++ b/mindspore/python/mindspore/ops/operations/nn_ops.py @@ -256,6 +256,9 @@ class AdaptiveMaxPool2D(Primitive): Output(i,j) &= {\max Input[h_{start}:h_{end}, w_{start}:w_{end}]} \end{align} + Note: + In Ascend, the second output `argmax` is invalid, please ignore it. + Args: output_size (Union[int, tuple]): The target output size is H x W. ouput_size can be a tuple, or a single H for H x H, and H and W can be int or None @@ -280,9 +283,10 @@ class AdaptiveMaxPool2D(Primitive): TypeError: If dtype of `input_x` is not float16, float32 or float64. ValueError: If `output_size` is a tuple and the length of `output_size` is not 2. ValueError: If the dimension of `input_x` is not NCHW or CHW. + ValueError: If `output_size` is less than -1. Supported Platforms: - ``GPU`` + ``Ascend`` ``GPU`` ``CPU`` Examples: >>> # case 1: output_size=(None, 2) @@ -331,6 +335,8 @@ class AdaptiveMaxPool2D(Primitive): self.output_size = (output_size, output_size) if isinstance(self.output_size, int) else output_size self.output_size = (-1 if self.output_size[0] is None else self.output_size[0], -1 if self.output_size[1] is None else self.output_size[1]) + for size in self.output_size: + validator.check_number("output_size", size, -1, Rel.GE, None) self.add_prim_attr('output_size', self.output_size) self.add_prim_attr('return_indices', return_indices)