!28314 add ascend Conv2dBackpropInputBiasaddFusion

Merge pull request !28314 from lyqlola/master
This commit is contained in:
i-robot 2021-12-31 09:57:13 +00:00 committed by Gitee
commit ad4836c538
5 changed files with 160 additions and 1 deletions

View File

@ -68,6 +68,7 @@
#include "backend/optimizer/ascend/ir_fusion/mul_add_fusion.h"
#include "backend/optimizer/ascend/ir_fusion/mul_addn_fusion.h"
#include "backend/optimizer/ascend/ir_fusion/matmul_biasadd_fusion.h"
#include "backend/optimizer/ascend/ir_fusion/conv2d_backprop_input_biasadd_fusion.h"
#include "backend/optimizer/ascend/ir_fusion/remove_reshape_pair.h"
#include "backend/optimizer/ascend/ir_fusion/derelu_fusion.h"
#include "backend/optimizer/ascend/ir_fusion/batchnorm_to_bninfer.h"
@ -218,6 +219,7 @@ void AddAscendIRFusionPass(PassManager *ir_fusion_pm) {
ir_fusion_pm->AddPass(std::make_shared<MulAddFusion>());
ir_fusion_pm->AddPass(std::make_shared<MulAddNFusion>());
ir_fusion_pm->AddPass(std::make_shared<MatmulBiasaddFusion>());
ir_fusion_pm->AddPass(std::make_shared<Conv2dBackpropInputBiasaddFusion>());
ir_fusion_pm->AddPass(std::make_shared<MatmulAddFusion>());
ir_fusion_pm->AddPass(std::make_shared<AddnFission>());
ir_fusion_pm->AddPass(std::make_shared<DereluFusion>());

View File

@ -0,0 +1,62 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "backend/optimizer/ascend/ir_fusion/conv2d_backprop_input_biasadd_fusion.h"
#include <memory>
#include <vector>
#include "backend/session/anf_runtime_algorithm.h"
#include "backend/optimizer/common/helper.h"
#include "utils/utils.h"
#include "utils/trace_base.h"
#include "runtime/device/ascend/lic_manager.h"
namespace mindspore {
namespace opt {
const BaseRef Conv2dBackpropInputBiasaddFusion::DefinePattern() const {
VectorRef conv2d_bp_input({conv2d_bp_input_var_, x0_, x1_});
VectorRef pattern({prim::kPrimBiasAdd, conv2d_bp_input, x2_});
return pattern;
}
const AnfNodePtr Conv2dBackpropInputBiasaddFusion::Process(const FuncGraphPtr &graph, const AnfNodePtr &node,
const EquivPtr &equiv) const {
MS_EXCEPTION_IF_NULL(graph);
MS_EXCEPTION_IF_NULL(node);
auto conv2d_bp_input = GetAnfNodeByVar(equiv, conv2d_bp_input_var_);
if (conv2d_bp_input == nullptr || !conv2d_bp_input->isa<CNode>()) {
MS_LOG(EXCEPTION) << "Get cnode Conv2DBackpropInput failed!" << trace::DumpSourceLines(conv2d_bp_input);
}
// If there is a side-effect operator in the fusion, do not merge
if (!IsStateEquivalent(node, conv2d_bp_input)) {
return node;
}
std::vector<AnfNodePtr> inputs;
inputs.emplace_back(NewValueNode(std::make_shared<Primitive>(prim::kPrimConv2DTranspose->name())));
inputs.emplace_back(GetAnfNodeByVar(equiv, x0_));
inputs.emplace_back(GetAnfNodeByVar(equiv, x1_));
inputs.emplace_back(GetAnfNodeByVar(equiv, x2_));
auto new_node = NewCNode(inputs, graph);
MS_EXCEPTION_IF_NULL(new_node);
new_node->set_scope(node->scope());
new_node->set_abstract(node->abstract());
AnfAlgo::CopyNodeAttrs(conv2d_bp_input, new_node);
return new_node;
}
} // namespace opt
} // namespace mindspore

View File

@ -0,0 +1,46 @@
/**
* Copyright 2020-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_IR_FUSION_CONV2D_BACKPROP_INPUT_BIASADD_FUSION_H_
#define MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_IR_FUSION_CONV2D_BACKPROP_INPUT_BIASADD_FUSION_H_
#include <memory>
#include "backend/optimizer/common/optimizer.h"
namespace mindspore {
namespace opt {
class Conv2dBackpropInputBiasaddFusion : public PatternProcessPass {
public:
explicit Conv2dBackpropInputBiasaddFusion(bool multigraph = true)
: PatternProcessPass("conv2d_backprop_input_biasadd_fusion", multigraph) {
x0_ = std::make_shared<Var>();
x1_ = std::make_shared<Var>();
x2_ = std::make_shared<Var>();
conv2d_bp_input_var_ = std::make_shared<Var>(std::make_shared<Primitive>(prim::kPrimConv2DBackpropInput->name()));
}
~Conv2dBackpropInputBiasaddFusion() override = default;
const BaseRef DefinePattern() const override;
const AnfNodePtr Process(const FuncGraphPtr &, const AnfNodePtr &, const EquivPtr &) const override;
private:
VarPtr x0_;
VarPtr x1_;
VarPtr x2_;
VarPtr conv2d_bp_input_var_;
};
} // namespace opt
} // namespace mindspore
#endif // MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_IR_FUSION_CONV2D_BACKPROP_INPUT_BIASADD_FUSION_H_

View File

@ -89,6 +89,7 @@ from .conv2d_backprop_input import _conv2d_backprop_input_tbe
from .conv2d_ds import _conv2d_ds_tbe
from .conv2d_backprop_filter_ds import _conv2d_backprop_filter_ds_tbe
from .conv2d_backprop_input_ds import _conv2d_backprop_input_ds_tbe
from .conv2d_transpose import _conv2d_transpose_tbe
from .confusion_mul_grad import _confusion_mul_grad_tbe
from .dropout_do_mask import _dropout_do_mask_tbe
from .dropout_do_mask_ds import _dropout_do_mask_ds_tbe

View File

@ -0,0 +1,48 @@
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Conv2DTranspose op"""
from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType
conv2d_transpose_op_info = TBERegOp("Conv2DTranspose") \
.fusion_type("CONVOLUTION") \
.async_flag(False) \
.binfile_name("conv2d_transpose_d.so") \
.compute_cost(10) \
.kernel_name("conv2d_transpose_d") \
.partial_flag(True) \
.attr("input_sizes", "required", "listInt", "all") \
.attr("stride", "required", "listInt", "all") \
.attr("pad_list", "required", "listInt", "all") \
.attr("dilation", "optional", "listInt", "all", "1,1,1,1") \
.attr("groups", "optional", "int", "all", "1") \
.attr("format", "optional", "str", "all", "NHWC") \
.attr("output_padding", "optional", "listInt", "all", "0,0,0,0") \
.attr("offset_x", "optional", "int", "all", "0") \
.input(0, "x", False, "required", "all") \
.input(1, "filter", False, "required", "all") \
.input(2, "bias", False, "optional", "all") \
.input(3, "offset_w", False, "optional", "all") \
.output(0, "y", True, "required", "all") \
.dtype_format(DataType.F16_5HD, DataType.F16_FracZ, DataType.F16_Default, DataType.I8_Default, DataType.F16_5HD) \
.dtype_format(DataType.F16_5HD, DataType.F16_FracZ, DataType.F32_Default, DataType.I8_Default, DataType.F32_5HD) \
.dtype_format(DataType.I8_5HD, DataType.I8_FracZ, DataType.I32_Default, DataType.I8_Default, DataType.I32_5HD) \
.get_op_info()
@op_info_register(conv2d_transpose_op_info)
def _conv2d_transpose_tbe():
"""Conv2DTranspose TBE register"""
return