!35896 Fix AdaptiveMaxpool2D op don't support second output.

Merge pull request !35896 from linqingke/adative_maxpool_2d_support_ascend
This commit is contained in:
i-robot 2022-06-15 01:40:24 +00:00 committed by Gitee
commit 25e924d923
No known key found for this signature in database
GPG Key ID: 173E9B9CA92EEF8F
3 changed files with 29 additions and 5 deletions

View File

@ -7,6 +7,7 @@
"mindspore/mindspore/core/abstract/ops/prim_nn.cc" "zerodivcond"
"mindspore/mindspore/ccsrc/frontend/operator/ops_front_infer_function.cc" "zerodivcond"
"mindspore/mindspore/ccsrc/pipeline/jit/pipeline_split.cc" "zerodivcond"
"mindspore/mindspore/ccsrc/plugin/device/ascend/optimizer/ir_fusion/adaptive_max_pool2d_fusion.cc" "zerodivcond"
"mindspore/mindspore/ccsrc/plugin/device/ascend/hal/device/ascend_stream_assign.cc" "useStlAlgorithm"
"mindspore/mindspore/ccsrc/frontend/operator/ops_front_infer_function.cc" "uninitvar"
"mindspore/mindspore/ccsrc/pipeline/jit/action.cc" "constVariable"

View File

@ -17,11 +17,13 @@
#include <memory>
#include <vector>
#include "plugin/device/ascend/optimizer/ascend_helper.h"
#include "backend/common/optimizer/helper.h"
namespace mindspore {
namespace opt {
namespace {
constexpr int64_t kMaxPaddingSize = 10;
constexpr int64_t kAdaptiveMaxpool2DOutputNumber = 1;
std::vector<int64_t> ComputeKernelAttr(int64_t input_size, int64_t output_size) {
int64_t padding_size = 0;
@ -115,11 +117,24 @@ const AnfNodePtr AdaptiveMaxPool2DFusion::Process(const FuncGraphPtr &func_graph
int64_t height = SizeToLong(input_shape.at(kDim2));
int64_t width = SizeToLong(input_shape.at(kDim3));
if (output_size[kDim0] <= 0 || output_size[kDim1] <= 0) {
int64_t output_h = (output_size[kDim0] == -1) ? height : output_size[kDim0];
int64_t output_w = (output_size[kDim1] == -1) ? width : output_size[kDim1];
if (output_h <= 0 || output_w <= 0) {
MS_LOG(EXCEPTION) << "AdaptiveMaxPool2D's output_size value is invalid.";
}
auto height_attr = ComputeKernelAttr(height, output_size[kDim0]);
auto width_attr = ComputeKernelAttr(width, output_size[kDim1]);
if (height % output_h != 0 || width % output_w != 0) {
auto types = {common::AnfAlgo::GetOutputInferDataType(adaptive_max_pool2d, 0), kNumberTypeInt64};
auto shapes = {common::AnfAlgo::GetOutputDetailShape(adaptive_max_pool2d, 0),
common::AnfAlgo::GetOutputDetailShape(adaptive_max_pool2d, 0)};
common::AnfAlgo::SetOutputTypeAndDetailShape(types, shapes, adaptive_max_pool2d.get());
std::vector<int64_t> new_output_size{output_h, output_w};
common::AnfAlgo::SetNodeAttr(kAttrOutputSize, MakeValue(new_output_size), adaptive_max_pool2d);
std::vector<AnfNodePtr> multi_outputs;
CreateMultipleOutputsOfAnfNode(func_graph, adaptive_max_pool2d, kAdaptiveMaxpool2DOutputNumber, &multi_outputs);
return multi_outputs[kIndex0];
}
auto height_attr = ComputeKernelAttr(height, output_h);
auto width_attr = ComputeKernelAttr(width, output_w);
if (height_attr[kIndex0] == -1 || width_attr[kIndex0] == -1) {
MS_LOG(EXCEPTION) << "Current AdaptiveMaxPool2D not support this scene! node:" << node->DebugString();
}
@ -128,8 +143,10 @@ const AnfNodePtr AdaptiveMaxPool2DFusion::Process(const FuncGraphPtr &func_graph
(void)pooling_inputs.insert(pooling_inputs.end(), adaptive_max_pool2d->inputs().begin() + 1,
adaptive_max_pool2d->inputs().end());
auto pooling = NewCNode(pooling_inputs, kernel_graph);
auto types = {common::AnfAlgo::GetOutputInferDataType(adaptive_max_pool2d, 0)};
auto shapes = {common::AnfAlgo::GetOutputDetailShape(adaptive_max_pool2d, 0)};
common::AnfAlgo::SetOutputTypeAndDetailShape(types, shapes, pooling.get());
pooling->set_scope(adaptive_max_pool2d->scope());
pooling->set_abstract(adaptive_max_pool2d->abstract());
SetNodeAttr(pooling, height_attr, width_attr);
return pooling;
}

View File

@ -256,6 +256,9 @@ class AdaptiveMaxPool2D(Primitive):
Output(i,j) &= {\max Input[h_{start}:h_{end}, w_{start}:w_{end}]}
\end{align}
Note:
In Ascend, the second output `argmax` is invalid, please ignore it.
Args:
output_size (Union[int, tuple]): The target output size is H x W.
ouput_size can be a tuple, or a single H for H x H, and H and W can be int or None
@ -280,9 +283,10 @@ class AdaptiveMaxPool2D(Primitive):
TypeError: If dtype of `input_x` is not float16, float32 or float64.
ValueError: If `output_size` is a tuple and the length of `output_size` is not 2.
ValueError: If the dimension of `input_x` is not NCHW or CHW.
ValueError: If `output_size` is less than -1.
Supported Platforms:
``GPU``
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> # case 1: output_size=(None, 2)
@ -331,6 +335,8 @@ class AdaptiveMaxPool2D(Primitive):
self.output_size = (output_size, output_size) if isinstance(self.output_size, int) else output_size
self.output_size = (-1 if self.output_size[0] is None else self.output_size[0],
-1 if self.output_size[1] is None else self.output_size[1])
for size in self.output_size:
validator.check_number("output_size", size, -1, Rel.GE, None)
self.add_prim_attr('output_size', self.output_size)
self.add_prim_attr('return_indices', return_indices)