!33094 add TensorScatter op fission

Merge pull request !33094 from yuchaojie/ir_fusion
This commit is contained in:
i-robot 2022-04-19 06:18:38 +00:00 committed by Gitee
commit 19416c19f7
No known key found for this signature in database
GPG Key ID: 173E9B9CA92EEF8F
7 changed files with 222 additions and 113 deletions

View File

@ -27,7 +27,6 @@
#include "plugin/device/ascend/optimizer/ir_fission/batch_norm_grad_split.h"
#include "plugin/device/ascend/optimizer/ir_fission/batch_norm_bert_fission.h"
#include "plugin/device/ascend/optimizer/ir_fission/single_batch_norm_fission.h"
#include "plugin/device/ascend/optimizer/ir_fission/tensor_scatter_update_fission.h"
#include "plugin/device/ascend/optimizer/ir_fission/reduce_min_fission.h"
#include "plugin/device/ascend/optimizer/ir_fusion/fused_batch_norm_fusion.h"
#include "plugin/device/ascend/optimizer/ir_fission/layer_norm_grad_split.h"
@ -38,6 +37,7 @@
#include "plugin/device/ascend/optimizer/ir_fission/reduce_sum_fission.h"
#include "plugin/device/ascend/optimizer/ir_fission/cdist_fission.h"
#include "plugin/device/ascend/optimizer/ir_fission/seed_adapter.h"
#include "plugin/device/ascend/optimizer/ir_fission/tensor_scatter_fission.h"
#include "backend/common/pass/communication_op_fusion.h"
#include "plugin/device/ascend/optimizer/ir_fusion/square_sum_fusion.h"
#include "plugin/device/ascend/optimizer/ir_fusion/clip_by_norm_no_div_square_sum_fusion.h"
@ -233,6 +233,10 @@ void AddAscendIRFusionPass(PassManager *ir_fusion_pm) {
ir_fusion_pm->AddPass(std::make_shared<SplitVFission>());
ir_fusion_pm->AddPass(std::make_shared<SpaceToDepthSplit>());
ir_fusion_pm->AddPass(std::make_shared<TensorScatterUpdateFission>());
ir_fusion_pm->AddPass(std::make_shared<TensorScatterAddFission>());
ir_fusion_pm->AddPass(std::make_shared<TensorScatterSubFission>());
ir_fusion_pm->AddPass(std::make_shared<TensorScatterMaxFission>());
ir_fusion_pm->AddPass(std::make_shared<TensorScatterMinFission>());
ir_fusion_pm->AddPass(std::make_shared<GetitemTuple>());
ir_fusion_pm->AddPass(std::make_shared<PackFission>());
ir_fusion_pm->AddPass(std::make_shared<ConcatFission>());
@ -400,6 +404,10 @@ void RunOpAscendBackendIRFusionOptimization(const std::shared_ptr<session::Kerne
ir_fusion_pm->AddPass(std::make_shared<AddnFission>());
ir_fusion_pm->AddPass(std::make_shared<InsertPadForNMSWithMask>());
ir_fusion_pm->AddPass(std::make_shared<TensorScatterUpdateFission>());
ir_fusion_pm->AddPass(std::make_shared<TensorScatterAddFission>());
ir_fusion_pm->AddPass(std::make_shared<TensorScatterSubFission>());
ir_fusion_pm->AddPass(std::make_shared<TensorScatterMaxFission>());
ir_fusion_pm->AddPass(std::make_shared<TensorScatterMinFission>());
ir_fusion_pm->AddPass(std::make_shared<EraseVisitAttr>());
ir_fusion_pm->AddPass(std::make_shared<BroadcasttoFission>());
ir_fusion_pm->AddPass(std::make_shared<ReduceSumFission>());

View File

@ -0,0 +1,111 @@
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "plugin/device/ascend/optimizer/ir_fission/tensor_scatter_fission.h"
#include <vector>
#include <memory>
#include "backend/common/session/anf_runtime_algorithm.h"
#include "include/common/utils/anfalgo.h"
#include "include/common/utils/utils.h"
#include "ops/core_ops.h"
#include "backend/common/optimizer/helper.h"
#include "utils/trace_base.h"
namespace mindspore {
namespace opt {
namespace {
constexpr size_t kTensorScatterInputSize = 3;
} // namespace
const AnfNodePtr TensorScatterFission::Process(const FuncGraphPtr &graph, const AnfNodePtr &node,
const EquivPtr &) const {
MS_EXCEPTION_IF_NULL(graph);
MS_EXCEPTION_IF_NULL(node);
auto cnode = CheckAnfNodeIfCNodeAndInputSize(node, kTensorScatterInputSize);
// create TensorMove
auto tensor_move_inputs =
std::vector<AnfNodePtr>{NewValueNode(std::make_shared<Primitive>(kTensorMoveOpName)), cnode->input(kIndex1)};
auto tensor_move = NewCNode(tensor_move_inputs, graph);
tensor_move->set_scope(node->scope());
MS_EXCEPTION_IF_NULL(cnode->input(kIndex1));
tensor_move->set_abstract(cnode->input(kIndex1)->abstract());
// create ScatterNd node
auto scatter_nd_inputs =
std::vector<AnfNodePtr>{GetScatterNdPrimNode(), tensor_move, cnode->input(kIndex2), cnode->input(kIndex3)};
auto scatter_nd_node = NewCNode(scatter_nd_inputs, graph);
scatter_nd_node->set_scope(node->scope());
scatter_nd_node->set_abstract(node->abstract());
common::AnfAlgo::SetNodeAttr(kAttrUseLocking, MakeValue(false), scatter_nd_node);
return scatter_nd_node;
}
ValueNodePtr TensorScatterUpdateFission::GetScatterNdPrimNode() const {
return NewValueNode(std::make_shared<Primitive>(prim::kPrimScatterNdUpdate->name()));
}
const BaseRef TensorScatterUpdateFission::DefinePattern() const {
VarPtr input = std::make_shared<Var>();
VarPtr indices = std::make_shared<Var>();
VarPtr updates = std::make_shared<Var>();
return VectorRef({prim::kPrimTensorScatterUpdate, input, indices, updates});
}
ValueNodePtr TensorScatterAddFission::GetScatterNdPrimNode() const {
return NewValueNode(std::make_shared<Primitive>(prim::kPrimScatterNdAdd->name()));
}
const BaseRef TensorScatterAddFission::DefinePattern() const {
VarPtr input = std::make_shared<Var>();
VarPtr indices = std::make_shared<Var>();
VarPtr updates = std::make_shared<Var>();
return VectorRef({prim::kPrimTensorScatterAdd, input, indices, updates});
}
ValueNodePtr TensorScatterSubFission::GetScatterNdPrimNode() const {
return NewValueNode(std::make_shared<Primitive>(prim::kPrimScatterNdSub->name()));
}
const BaseRef TensorScatterSubFission::DefinePattern() const {
VarPtr input = std::make_shared<Var>();
VarPtr indices = std::make_shared<Var>();
VarPtr updates = std::make_shared<Var>();
return VectorRef({prim::kPrimTensorScatterSub, input, indices, updates});
}
ValueNodePtr TensorScatterMaxFission::GetScatterNdPrimNode() const {
return NewValueNode(std::make_shared<Primitive>(prim::kPrimScatterNdMax->name()));
}
const BaseRef TensorScatterMaxFission::DefinePattern() const {
VarPtr input = std::make_shared<Var>();
VarPtr indices = std::make_shared<Var>();
VarPtr updates = std::make_shared<Var>();
return VectorRef({prim::kPrimTensorScatterMax, input, indices, updates});
}
ValueNodePtr TensorScatterMinFission::GetScatterNdPrimNode() const {
return NewValueNode(std::make_shared<Primitive>(prim::kPrimScatterNdMin->name()));
}
const BaseRef TensorScatterMinFission::DefinePattern() const {
VarPtr input = std::make_shared<Var>();
VarPtr indices = std::make_shared<Var>();
VarPtr updates = std::make_shared<Var>();
return VectorRef({prim::kPrimTensorScatterMin, input, indices, updates});
}
} // namespace opt
} // namespace mindspore

View File

@ -0,0 +1,94 @@
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_PLUGIN_DEVICE_ASCEND_OPTIMIZER_IR_FUSION_TENSOR_SCATTER_FISSION_H_
#define MINDSPORE_CCSRC_PLUGIN_DEVICE_ASCEND_OPTIMIZER_IR_FUSION_TENSOR_SCATTER_FISSION_H_
#include <string>
#include "backend/common/optimizer/pass.h"
#include "ir/func_graph.h"
#include "ir/anf.h"
#include "backend/common/optimizer/optimizer.h"
namespace mindspore {
namespace opt {
class TensorScatterFission : public PatternProcessPass {
public:
explicit TensorScatterFission(bool multigraph = true, const string &name = "tensor_scatter_fission")
: PatternProcessPass(name, multigraph) {}
~TensorScatterFission() override = default;
const AnfNodePtr Process(const FuncGraphPtr &func_graph, const AnfNodePtr &node, const EquivPtr &) const override;
protected:
virtual ValueNodePtr GetScatterNdPrimNode() const = 0;
};
class TensorScatterUpdateFission : public TensorScatterFission {
public:
explicit TensorScatterUpdateFission(bool multigraph = true, const string &name = "tensor_scatter_update_fission")
: TensorScatterFission(multigraph, name) {}
~TensorScatterUpdateFission() override = default;
const BaseRef DefinePattern() const override;
protected:
ValueNodePtr GetScatterNdPrimNode() const override;
};
class TensorScatterAddFission : public TensorScatterFission {
public:
explicit TensorScatterAddFission(bool multigraph = true, const string &name = "tensor_scatter_add_fission")
: TensorScatterFission(multigraph, name) {}
~TensorScatterAddFission() override = default;
const BaseRef DefinePattern() const override;
protected:
ValueNodePtr GetScatterNdPrimNode() const override;
};
class TensorScatterSubFission : public TensorScatterFission {
public:
explicit TensorScatterSubFission(bool multigraph = true, const string &name = "tensor_scatter_sub_fission")
: TensorScatterFission(multigraph, name) {}
~TensorScatterSubFission() override = default;
const BaseRef DefinePattern() const override;
protected:
ValueNodePtr GetScatterNdPrimNode() const override;
};
class TensorScatterMaxFission : public TensorScatterFission {
public:
explicit TensorScatterMaxFission(bool multigraph = true, const string &name = "tensor_scatter_max_fission")
: TensorScatterFission(multigraph, name) {}
~TensorScatterMaxFission() override = default;
const BaseRef DefinePattern() const override;
protected:
ValueNodePtr GetScatterNdPrimNode() const override;
};
class TensorScatterMinFission : public TensorScatterFission {
public:
explicit TensorScatterMinFission(bool multigraph = true, const string &name = "tensor_scatter_min_fission")
: TensorScatterFission(multigraph, name) {}
~TensorScatterMinFission() override = default;
const BaseRef DefinePattern() const override;
protected:
ValueNodePtr GetScatterNdPrimNode() const override;
};
} // namespace opt
} // namespace mindspore
#endif // MINDSPORE_CCSRC_PLUGIN_DEVICE_ASCEND_OPTIMIZER_IR_FUSION_TENSOR_SCATTER_FISSION_H_

View File

@ -1,73 +0,0 @@
/**
* Copyright 2020-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "plugin/device/ascend/optimizer/ir_fission/tensor_scatter_update_fission.h"
#include <vector>
#include <memory>
#include "backend/common/session/anf_runtime_algorithm.h"
#include "include/common/utils/anfalgo.h"
#include "backend/common/optimizer/helper.h"
namespace mindspore {
namespace opt {
CNodePtr TensorScatterUpdateFission::CreateTensorMove(const FuncGraphPtr &graph,
const CNodePtr &tensor_scatter_update) const {
MS_EXCEPTION_IF_NULL(graph);
MS_EXCEPTION_IF_NULL(tensor_scatter_update);
std::vector<AnfNodePtr> inputs = {NewValueNode(std::make_shared<Primitive>(kTensorMoveOpName)),
tensor_scatter_update->input(1)};
auto tensor_move = NewCNode(inputs, graph);
MS_EXCEPTION_IF_NULL(tensor_move);
tensor_move->set_scope(tensor_scatter_update->scope());
tensor_move->set_abstract(tensor_scatter_update->abstract());
common::AnfAlgo::SetNodeAttr(kAttrUseLocking, MakeValue(false), tensor_move);
return tensor_move;
}
CNodePtr TensorScatterUpdateFission::CreateScatterNdUpdate(const FuncGraphPtr &graph,
const CNodePtr &tensor_scatter_update,
const CNodePtr &tensor_move) const {
MS_EXCEPTION_IF_NULL(graph);
MS_EXCEPTION_IF_NULL(tensor_scatter_update);
MS_EXCEPTION_IF_NULL(tensor_move);
std::vector<AnfNodePtr> inputs = {NewValueNode(std::make_shared<Primitive>(kScatterNdUpdateOpName)), tensor_move,
tensor_scatter_update->input(2), tensor_scatter_update->input(3)};
auto scatter_nd_update = NewCNode(inputs, graph);
MS_EXCEPTION_IF_NULL(scatter_nd_update);
scatter_nd_update->set_scope(tensor_scatter_update->scope());
scatter_nd_update->set_abstract(tensor_scatter_update->abstract());
return scatter_nd_update;
}
const BaseRef TensorScatterUpdateFission::DefinePattern() const {
VarPtr Xs = std::make_shared<SeqVar>();
auto prim = std::make_shared<Primitive>(kTensorScatterUpdateOpName);
return VectorRef({prim, Xs});
}
const AnfNodePtr TensorScatterUpdateFission::Process(const FuncGraphPtr &func_graph, const AnfNodePtr &node,
const EquivPtr &) const {
MS_EXCEPTION_IF_NULL(func_graph);
MS_EXCEPTION_IF_NULL(node);
auto tensor_scatter_update = node->cast<CNodePtr>();
constexpr size_t INPUT_NUM = 4;
if (tensor_scatter_update == nullptr || tensor_scatter_update->size() != INPUT_NUM) {
return nullptr;
}
auto tensor_move = CreateTensorMove(func_graph, tensor_scatter_update);
return CreateScatterNdUpdate(func_graph, tensor_scatter_update, tensor_move);
}
} // namespace opt
} // namespace mindspore

View File

@ -1,38 +0,0 @@
/**
* Copyright 2020-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_PRE_ACTIVATE_ASCEND_IR_FISSION_TENSOR_SCATTER_UPDATE_FISSION_H_
#define MINDSPORE_CCSRC_PRE_ACTIVATE_ASCEND_IR_FISSION_TENSOR_SCATTER_UPDATE_FISSION_H_
#include "backend/common/optimizer/optimizer.h"
namespace mindspore {
namespace opt {
class TensorScatterUpdateFission : public PatternProcessPass {
public:
explicit TensorScatterUpdateFission(bool multigraph = true)
: PatternProcessPass("tensor_scatter_update_fission", multigraph) {}
~TensorScatterUpdateFission() override = default;
const BaseRef DefinePattern() const override;
const AnfNodePtr Process(const FuncGraphPtr &, const AnfNodePtr &, const EquivPtr &) const override;
private:
CNodePtr CreateTensorMove(const FuncGraphPtr &graph, const CNodePtr &tensor_scatter_update) const;
CNodePtr CreateScatterNdUpdate(const FuncGraphPtr &graph, const CNodePtr &tensor_scatter_update,
const CNodePtr &tensor_move) const;
};
} // namespace opt
} // namespace mindspore
#endif // MINDSPORE_CCSRC_PRE_ACTIVATE_ASCEND_IR_FISSION_TENSOR_SCATTER_UPDATE_FISSION_H_

View File

@ -346,9 +346,16 @@ GVAR_DEF(PrimitivePtr, kPrimScatterMax, std::make_shared<Primitive>("ScatterMax"
GVAR_DEF(PrimitivePtr, kPrimScatterMin, std::make_shared<Primitive>("ScatterMin"));
GVAR_DEF(PrimitivePtr, kPrimScatterNdAdd, std::make_shared<Primitive>("ScatterNdAdd"));
GVAR_DEF(PrimitivePtr, kPrimScatterNdSub, std::make_shared<Primitive>("ScatterNdSub"));
GVAR_DEF(PrimitivePtr, kPrimScatterNdMax, std::make_shared<Primitive>("ScatterNdMax"));
GVAR_DEF(PrimitivePtr, kPrimScatterNdMin, std::make_shared<Primitive>("ScatterNdMin"));
GVAR_DEF(PrimitivePtr, kPrimScatterNdMul, std::make_shared<Primitive>("ScatterNdMul"));
GVAR_DEF(PrimitivePtr, kPrimScatterUpdate, std::make_shared<Primitive>("ScatterUpdate"));
GVAR_DEF(PrimitivePtr, kPrimScatterElements, std::make_shared<Primitive>("ScatterElements"));
GVAR_DEF(PrimitivePtr, kPrimTensorScatterUpdate, std::make_shared<Primitive>("TensorScatterUpdate"));
GVAR_DEF(PrimitivePtr, kPrimTensorScatterAdd, std::make_shared<Primitive>("TensorScatterAdd"));
GVAR_DEF(PrimitivePtr, kPrimTensorScatterSub, std::make_shared<Primitive>("TensorScatterSub"));
GVAR_DEF(PrimitivePtr, kPrimTensorScatterMax, std::make_shared<Primitive>("TensorScatterMax"));
GVAR_DEF(PrimitivePtr, kPrimTensorScatterMin, std::make_shared<Primitive>("TensorScatterMin"));
GVAR_DEF(PrimitivePtr, kPrimTensorCopySlices, std::make_shared<Primitive>("TensorCopySlices"));
GVAR_DEF(PrimitivePtr, kPrimMapUniform, std::make_shared<Primitive>("MapUniform"));
GVAR_DEF(PrimitivePtr, kPrimSplit, std::make_shared<Primitive>("Split"));

View File

@ -16,7 +16,7 @@
#include "common/backend_common_test.h"
#include "common/py_func_graph_fetcher.h"
#include "plugin/device/ascend/optimizer/ir_fission/tensor_scatter_update_fission.h"
#include "plugin/device/ascend/optimizer/ir_fission/tensor_scatter_fission.h"
#include "include/common/debug/anf_ir_dump.h"
namespace mindspore {