fix DumpFusionScopeInfo method and optimize some vector copy in UB fusion

This commit is contained in:
yuchaojie 2021-12-22 16:46:05 +08:00
parent 40211aa412
commit fa2c034c25
41 changed files with 572 additions and 488 deletions

File diff suppressed because one or more lines are too long

View File

@ -14,9 +14,6 @@
* limitations under the License.
*/
#include "backend/optimizer/ascend/buffer_fusion/batchmatmul_dropoutdomaskv3_fusion_pass.h"
#include <vector>
#include <memory>
#include <string>
#include "backend/kernel_compiler/kernel_fusion.h"
#include "debug/anf_ir_dump.h"
#include "backend/session/anf_runtime_algorithm.h"
@ -42,7 +39,7 @@ void BatchMatmulDropoutDoMaskV3FusionPass::MatchBatchMatmulDropoutDoMaskV3(
void BatchMatmulDropoutDoMaskV3FusionPass::MatchSingleFusionPattern(const session::KernelGraph &kernel_graph,
FusedNodeRecord *candidate_fusion) {
MS_EXCEPTION_IF_NULL(candidate_fusion);
std::vector<AnfNodePtr> node_list = TopoSort(kernel_graph.get_return());
const auto &node_list = TopoSort(kernel_graph.get_return());
for (auto &node : node_list) {
if (!AnfUtils::IsRealCNodeKernel(node) || fusion_id_allocator->HasFusionIdAttr(node) ||
AnfAlgo::CheckPrimitiveType(node, prim::kPrimReturn)) {

View File

@ -16,8 +16,6 @@
#ifndef MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_PASS_BATCHMATMUL_DROPOUTDOMASKV3_FUSION_PASS_H_
#define MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_PASS_BATCHMATMUL_DROPOUTDOMASKV3_FUSION_PASS_H_
#include <vector>
#include "utils/hash_set.h"
#include "backend/optimizer/ascend/buffer_fusion/fusion_base_pass.h"
#include "ir/anf.h"
@ -29,8 +27,6 @@
namespace mindspore {
namespace opt {
using FusedNodeRecord = std::vector<mindspore::HashSet<AnfNodePtr>>;
class BatchMatmulDropoutDoMaskV3FusionPass : public FusionBasePass {
public:
explicit BatchMatmulDropoutDoMaskV3FusionPass(FusionIdAllocatorPtr idAllocator)

View File

@ -39,7 +39,7 @@ void BatchMatmulFusedMulAddFusionPass::MatchBatchMatmulFusedMulAdd(const CNodePt
void BatchMatmulFusedMulAddFusionPass::MatchSingleFusionPattern(const session::KernelGraph &kernel_graph,
FusedNodeRecord *candidate_fusion) {
MS_EXCEPTION_IF_NULL(candidate_fusion);
std::vector<AnfNodePtr> node_list = TopoSort(kernel_graph.get_return());
const auto &node_list = TopoSort(kernel_graph.get_return());
for (auto &node : node_list) {
MS_EXCEPTION_IF_NULL(node);
if (!AnfUtils::IsRealCNodeKernel(node) || fusion_id_allocator->HasFusionIdAttr(node) ||

View File

@ -16,8 +16,6 @@
#ifndef MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_PASS_BATCHMATMUL_FUSEDMULADD_FUSION_PASS_H_
#define MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_PASS_BATCHMATMUL_FUSEDMULADD_FUSION_PASS_H_
#include <vector>
#include "utils/hash_set.h"
#include "backend/optimizer/ascend/buffer_fusion/fusion_base_pass.h"
#include "ir/anf.h"
@ -29,8 +27,6 @@
namespace mindspore {
namespace opt {
using FusedNodeRecord = std::vector<mindspore::HashSet<AnfNodePtr>>;
class BatchMatmulFusedMulAddFusionPass : public FusionBasePass {
public:
explicit BatchMatmulFusedMulAddFusionPass(FusionIdAllocatorPtr idAllocator)

View File

@ -14,9 +14,6 @@
* limitations under the License.
*/
#include "backend/optimizer/ascend/buffer_fusion/bnupdate_eltwise_eltwise_fusion_pass.h"
#include <vector>
#include <memory>
#include <string>
#include "utils/hash_set.h"
#include "backend/kernel_compiler/kernel_fusion.h"
#include "debug/anf_ir_dump.h"
@ -55,7 +52,7 @@ void BnupdateEltwiseEltwiseFusionPass::MatchBnupdateAddRelu(const CNodePtr &cnod
void BnupdateEltwiseEltwiseFusionPass::MatchSingleFusionPattern(const session::KernelGraph &kernel_graph,
FusedNodeRecord *candidate_fusion) {
MS_EXCEPTION_IF_NULL(candidate_fusion);
std::vector<AnfNodePtr> node_list = TopoSort(kernel_graph.get_return());
const auto &node_list = TopoSort(kernel_graph.get_return());
for (auto &node : node_list) {
if (!AnfUtils::IsRealCNodeKernel(node) || fusion_id_allocator->HasFusionIdAttr(node) ||
AnfAlgo::CheckPrimitiveType(node, prim::kPrimReturn)) {

View File

@ -16,8 +16,6 @@
#ifndef MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_PASS_BNUPDATE_ELTWISE_ELTWISE_FUSION_PASS_H_
#define MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_PASS_BNUPDATE_ELTWISE_ELTWISE_FUSION_PASS_H_
#include <vector>
#include "utils/hash_set.h"
#include "backend/optimizer/ascend/buffer_fusion/fusion_base_pass.h"
#include "ir/anf.h"
@ -29,8 +27,6 @@
namespace mindspore {
namespace opt {
using FusedNodeRecord = std::vector<mindspore::HashSet<AnfNodePtr>>;
class BnupdateEltwiseEltwiseFusionPass : public FusionBasePass {
public:
explicit BnupdateEltwiseEltwiseFusionPass(FusionIdAllocatorPtr idAllocator)

View File

@ -44,7 +44,7 @@ void BnupdateEltwiseFusionPass::MatchBnupdateDoubleOutputEltwise(const CNodePtr
void BnupdateEltwiseFusionPass::MatchSingleFusionPattern(const session::KernelGraph &kernel_graph,
FusedNodeRecord *candidate_fusion) {
MS_EXCEPTION_IF_NULL(candidate_fusion);
std::vector<AnfNodePtr> node_list = TopoSort(kernel_graph.get_return());
const auto &node_list = TopoSort(kernel_graph.get_return());
for (auto &node : node_list) {
if (!AnfUtils::IsRealCNodeKernel(node) || fusion_id_allocator->HasFusionIdAttr(node) ||
AnfAlgo::CheckPrimitiveType(node, prim::kPrimReturn)) {

View File

@ -16,8 +16,6 @@
#ifndef MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_PASS_BNUPDATE_ELTWISE_FUSION_PASS_H_
#define MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_PASS_BNUPDATE_ELTWISE_FUSION_PASS_H_
#include <vector>
#include "utils/hash_set.h"
#include "backend/optimizer/ascend/buffer_fusion/fusion_base_pass.h"
#include "ir/anf.h"
@ -29,8 +27,6 @@
namespace mindspore {
namespace opt {
using FusedNodeRecord = std::vector<mindspore::HashSet<AnfNodePtr>>;
class BnupdateEltwiseFusionPass : public FusionBasePass {
public:
explicit BnupdateEltwiseFusionPass(FusionIdAllocatorPtr idAllocator)

View File

@ -66,7 +66,7 @@ void Conv2DBackpropEltwiseEltwiseFusionPass::MatchConv2DBackpropInputEltwiseEltw
void Conv2DBackpropEltwiseEltwiseFusionPass::MatchSingleFusionPattern(const session::KernelGraph &kernel_graph,
FusedNodeRecord *candidate_fusion) {
MS_EXCEPTION_IF_NULL(candidate_fusion);
std::vector<AnfNodePtr> node_list = TopoSort(kernel_graph.get_return());
const auto &node_list = TopoSort(kernel_graph.get_return());
for (auto &node : node_list) {
if (!AnfUtils::IsRealCNodeKernel(node) || fusion_id_allocator->HasFusionIdAttr(node) ||
AnfAlgo::CheckPrimitiveType(node, prim::kPrimReturn)) {

View File

@ -16,8 +16,6 @@
#ifndef MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_PASS_CONV2DBACKPROP_ELTWISE_ELTWISE_FUSION_PASS_H_
#define MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_PASS_CONV2DBACKPROP_ELTWISE_ELTWISE_FUSION_PASS_H_
#include <vector>
#include "utils/hash_set.h"
#include "backend/optimizer/ascend/buffer_fusion/fusion_base_pass.h"
#include "ir/anf.h"
@ -29,8 +27,6 @@
namespace mindspore {
namespace opt {
using FusedNodeRecord = std::vector<mindspore::HashSet<AnfNodePtr>>;
class Conv2DBackpropEltwiseEltwiseFusionPass : public FusionBasePass {
public:
explicit Conv2DBackpropEltwiseEltwiseFusionPass(FusionIdAllocatorPtr idAllocator)

View File

@ -44,7 +44,7 @@ void Conv2DBackpropEltwiseFusionPass::MatchConv2DBackpropInputEltwise(const CNod
void Conv2DBackpropEltwiseFusionPass::MatchSingleFusionPattern(const session::KernelGraph &kernel_graph,
FusedNodeRecord *candidate_fusion) {
MS_EXCEPTION_IF_NULL(candidate_fusion);
std::vector<AnfNodePtr> node_list = TopoSort(kernel_graph.get_return());
const auto &node_list = TopoSort(kernel_graph.get_return());
for (auto &node : node_list) {
if (!AnfUtils::IsRealCNodeKernel(node) || fusion_id_allocator->HasFusionIdAttr(node) ||
AnfAlgo::CheckPrimitiveType(node, prim::kPrimReturn)) {

View File

@ -16,8 +16,6 @@
#ifndef MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_PASS_CONV2DBACKPROP_ELTWISE_FUSION_PASS_H_
#define MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_PASS_CONV2DBACKPROP_ELTWISE_FUSION_PASS_H_
#include <vector>
#include "utils/hash_set.h"
#include "backend/optimizer/ascend/buffer_fusion/fusion_base_pass.h"
#include "ir/anf.h"
@ -29,8 +27,6 @@
namespace mindspore {
namespace opt {
using FusedNodeRecord = std::vector<mindspore::HashSet<AnfNodePtr>>;
class Conv2DBackpropEltwiseFusionPass : public FusionBasePass {
public:
explicit Conv2DBackpropEltwiseFusionPass(FusionIdAllocatorPtr idAllocator)

View File

@ -41,7 +41,7 @@ void ConvBnReduceFusionPass::MatchConvBnreduce(const CNodePtr &cnode, const sess
void ConvBnReduceFusionPass::MatchSingleFusionPattern(const session::KernelGraph &kernel_graph,
FusedNodeRecord *candidate_fusion) {
MS_EXCEPTION_IF_NULL(candidate_fusion);
std::vector<AnfNodePtr> node_list = TopoSort(kernel_graph.get_return());
const auto &node_list = TopoSort(kernel_graph.get_return());
for (auto &node : node_list) {
if (!AnfUtils::IsRealCNodeKernel(node) || fusion_id_allocator->HasFusionIdAttr(node) ||
AnfAlgo::CheckPrimitiveType(node, prim::kPrimReturn)) {

View File

@ -16,8 +16,6 @@
#ifndef MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_CONV_BNREDUCE_FUSION_PASS_H_
#define MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_CONV_BNREDUCE_FUSION_PASS_H_
#include <vector>
#include "utils/hash_set.h"
#include "backend/optimizer/ascend/buffer_fusion/fusion_base_pass.h"
#include "ir/anf.h"
@ -29,8 +27,6 @@
namespace mindspore {
namespace opt {
using FusedNodeRecord = std::vector<mindspore::HashSet<AnfNodePtr>>;
class ConvBnReduceFusionPass : public FusionBasePass {
public:
explicit ConvBnReduceFusionPass(FusionIdAllocatorPtr idAllocator)

View File

@ -53,7 +53,7 @@ void ConvDoubleInFusionPass::MatchConvDoubleInEltwise(const CNodePtr &cnode, con
void ConvDoubleInFusionPass::MatchSingleFusionPattern(const session::KernelGraph &kernel_graph,
FusedNodeRecord *candidate_fusion) {
MS_EXCEPTION_IF_NULL(candidate_fusion);
std::vector<AnfNodePtr> node_list = TopoSort(kernel_graph.get_return());
const auto &node_list = TopoSort(kernel_graph.get_return());
for (auto &node : node_list) {
if (!AnfUtils::IsRealCNodeKernel(node) || fusion_id_allocator->HasFusionIdAttr(node) ||
AnfAlgo::CheckPrimitiveType(node, prim::kPrimReturn)) {

View File

@ -16,8 +16,6 @@
#ifndef MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_PASS_CONV_DOUBLE_IN_FUSION_PASS_H_
#define MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_PASS_CONV_DOUBLE_IN_FUSION_PASS_H_
#include <vector>
#include "utils/hash_set.h"
#include "backend/optimizer/ascend/buffer_fusion/fusion_base_pass.h"
#include "ir/anf.h"
@ -29,8 +27,6 @@
namespace mindspore {
namespace opt {
using FusedNodeRecord = std::vector<mindspore::HashSet<AnfNodePtr>>;
class ConvDoubleInFusionPass : public FusionBasePass {
public:
explicit ConvDoubleInFusionPass(FusionIdAllocatorPtr idAllocator)

View File

@ -14,8 +14,6 @@
* limitations under the License.
*/
#include "backend/optimizer/ascend/buffer_fusion/conv_single_in_fusion_pass.h"
#include <vector>
#include <memory>
#include "utils/hash_set.h"
#include "backend/kernel_compiler/kernel_fusion.h"
#include "debug/anf_ir_dump.h"
@ -57,7 +55,7 @@ void ConvSingleInFusionPass::MatchConvSingleInEltwise(const CNodePtr &cnode, con
void ConvSingleInFusionPass::MatchSingleFusionPattern(const session::KernelGraph &kernel_graph,
FusedNodeRecord *candidate_fusion) {
MS_EXCEPTION_IF_NULL(candidate_fusion);
std::vector<AnfNodePtr> node_list = TopoSort(kernel_graph.get_return());
const auto &node_list = TopoSort(kernel_graph.get_return());
for (auto &node : node_list) {
if (!AnfUtils::IsRealCNodeKernel(node) || fusion_id_allocator->HasFusionIdAttr(node) ||
AnfAlgo::CheckPrimitiveType(node, prim::kPrimReturn)) {

View File

@ -16,8 +16,6 @@
#ifndef MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_PASS_CONV_SINGLE_IN_FUSION_PASS_H_
#define MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_PASS_CONV_SINGLE_IN_FUSION_PASS_H_
#include <vector>
#include "utils/hash_set.h"
#include "backend/optimizer/ascend/buffer_fusion/fusion_base_pass.h"
#include "ir/anf.h"
@ -29,8 +27,6 @@
namespace mindspore {
namespace opt {
using FusedNodeRecord = std::vector<mindspore::HashSet<AnfNodePtr>>;
class ConvSingleInFusionPass : public FusionBasePass {
public:
explicit ConvSingleInFusionPass(FusionIdAllocatorPtr idAllocator)

View File

@ -52,7 +52,7 @@ void DepthwiseConvEltwiseFusionPass::MatchDepthwiseConvRelu(const CNodePtr &cnod
void DepthwiseConvEltwiseFusionPass::MatchSingleFusionPattern(const session::KernelGraph &kernel_graph,
FusedNodeRecord *candidate_fusion) {
MS_EXCEPTION_IF_NULL(candidate_fusion);
std::vector<AnfNodePtr> node_list = TopoSort(kernel_graph.get_return());
const auto &node_list = TopoSort(kernel_graph.get_return());
for (auto &node : node_list) {
if (!AnfUtils::IsRealCNodeKernel(node) || fusion_id_allocator->HasFusionIdAttr(node) ||
AnfAlgo::CheckPrimitiveType(node, prim::kPrimReturn)) {

View File

@ -16,8 +16,6 @@
#ifndef MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_PASS_DEPTHWISECONV_ELTWISE_FUSION_PASS_H_
#define MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_PASS_DEPTHWISECONV_ELTWISE_FUSION_PASS_H_
#include <vector>
#include "utils/hash_set.h"
#include "backend/optimizer/ascend/buffer_fusion/fusion_base_pass.h"
#include "ir/anf.h"
@ -29,8 +27,6 @@
namespace mindspore {
namespace opt {
using FusedNodeRecord = std::vector<mindspore::HashSet<AnfNodePtr>>;
class DepthwiseConvEltwiseFusionPass : public FusionBasePass {
public:
explicit DepthwiseConvEltwiseFusionPass(FusionIdAllocatorPtr idAllocator)

View File

@ -14,6 +14,7 @@
* limitations under the License.
*/
#include "backend/optimizer/ascend/buffer_fusion/eltwise_fusion_pass.h"
#include <vector>
#include "backend/kernel_compiler/kernel_fusion.h"
#include "backend/session/anf_runtime_algorithm.h"
#include "base/core_ops.h"

View File

@ -16,8 +16,6 @@
#ifndef MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_PASS_ELTWISE_FUSION_PASS_H_
#define MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_PASS_ELTWISE_FUSION_PASS_H_
#include <vector>
#include "utils/hash_set.h"
#include "backend/optimizer/ascend/buffer_fusion/fusion_base_pass.h"
#include "ir/anf.h"
@ -29,8 +27,6 @@
namespace mindspore {
namespace opt {
using FusedNodeRecord = std::vector<mindspore::HashSet<AnfNodePtr>>;
class EltwiseFusionPass : public FusionBasePass {
public:
explicit EltwiseFusionPass(FusionIdAllocatorPtr idAllocator) : FusionBasePass("EltwiseFusionPass", idAllocator) {

View File

@ -40,7 +40,7 @@ void MatmulConfusionTranposeFusionPass::MatchMatmulConfusionTranpose(const CNode
void MatmulConfusionTranposeFusionPass::MatchSingleFusionPattern(const session::KernelGraph &kernel_graph,
FusedNodeRecord *candidate_fusion) {
MS_EXCEPTION_IF_NULL(candidate_fusion);
std::vector<AnfNodePtr> node_list = TopoSort(kernel_graph.get_return());
const auto &node_list = TopoSort(kernel_graph.get_return());
for (auto &node : node_list) {
if (!AnfUtils::IsRealCNodeKernel(node) || fusion_id_allocator->HasFusionIdAttr(node) ||
AnfAlgo::CheckPrimitiveType(node, prim::kPrimReturn)) {

View File

@ -16,8 +16,6 @@
#ifndef MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_PASS_MATMUL_CONFUSIONTRANSPOSE_FUSION_PASS_H_
#define MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_PASS_MATMUL_CONFUSIONTRANSPOSE_FUSION_PASS_H_
#include <vector>
#include "utils/hash_set.h"
#include "backend/optimizer/ascend/buffer_fusion/fusion_base_pass.h"
#include "ir/anf.h"
@ -29,8 +27,6 @@
namespace mindspore {
namespace opt {
using FusedNodeRecord = std::vector<mindspore::HashSet<AnfNodePtr>>;
class MatmulConfusionTranposeFusionPass : public FusionBasePass {
public:
explicit MatmulConfusionTranposeFusionPass(FusionIdAllocatorPtr idAllocator)

View File

@ -14,9 +14,6 @@
* limitations under the License.
*/
#include "backend/optimizer/ascend/buffer_fusion/matmul_dropoutdomaskv3_add_fusion_pass.h"
#include <vector>
#include <memory>
#include <string>
#include "backend/kernel_compiler/kernel_fusion.h"
#include "debug/anf_ir_dump.h"
#include "backend/session/anf_runtime_algorithm.h"
@ -51,7 +48,7 @@ void MatmulDropoutDoMaskV3AddFusionPass::MatchMatmulDropoutDoMaskV3Add(const CNo
void MatmulDropoutDoMaskV3AddFusionPass::MatchSingleFusionPattern(const session::KernelGraph &kernel_graph,
FusedNodeRecord *candidate_fusion) {
MS_EXCEPTION_IF_NULL(candidate_fusion);
std::vector<AnfNodePtr> node_list = TopoSort(kernel_graph.get_return());
const auto &node_list = TopoSort(kernel_graph.get_return());
for (auto &node : node_list) {
if (!AnfUtils::IsRealCNodeKernel(node) || fusion_id_allocator->HasFusionIdAttr(node) ||
AnfAlgo::CheckPrimitiveType(node, prim::kPrimReturn)) {

View File

@ -16,8 +16,6 @@
#ifndef MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_PASS_MATMUL_DROPOUTDOMASKV3_ADD_FUSION_PASS_H_
#define MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_PASS_MATMUL_DROPOUTDOMASKV3_ADD_FUSION_PASS_H_
#include <vector>
#include "utils/hash_set.h"
#include "backend/optimizer/ascend/buffer_fusion/fusion_base_pass.h"
#include "ir/anf.h"
@ -29,8 +27,6 @@
namespace mindspore {
namespace opt {
using FusedNodeRecord = std::vector<mindspore::HashSet<AnfNodePtr>>;
class MatmulDropoutDoMaskV3AddFusionPass : public FusionBasePass {
public:
explicit MatmulDropoutDoMaskV3AddFusionPass(FusionIdAllocatorPtr idAllocator)

View File

@ -39,7 +39,7 @@ void MatmulEltwiseFusionPass::MatchSingleFusionPattern(const session::KernelGrap
FusedNodeRecord *candidate_fusion) {
MS_EXCEPTION_IF_NULL(candidate_fusion);
std::vector<AnfNodePtr> node_list = TopoSort(kernel_graph.get_return());
const auto &node_list = TopoSort(kernel_graph.get_return());
for (auto &node : node_list) {
if (!AnfUtils::IsRealCNodeKernel(node) || fusion_id_allocator->HasFusionIdAttr(node) ||
AnfAlgo::CheckPrimitiveType(node, prim::kPrimReturn)) {

View File

@ -16,8 +16,6 @@
#ifndef MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_PASS_MATMUL_ELTWISE_FUSION_PASS_H_
#define MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_PASS_MATMUL_ELTWISE_FUSION_PASS_H_
#include <vector>
#include "utils/hash_set.h"
#include "backend/optimizer/ascend/buffer_fusion/fusion_base_pass.h"
#include "ir/anf.h"
@ -29,8 +27,6 @@
namespace mindspore {
namespace opt {
using FusedNodeRecord = std::vector<mindspore::HashSet<AnfNodePtr>>;
class MatmulEltwiseFusionPass : public FusionBasePass {
public:
explicit MatmulEltwiseFusionPass(FusionIdAllocatorPtr idAllocator)

View File

@ -14,6 +14,7 @@
* limitations under the License.
*/
#include "backend/optimizer/ascend/buffer_fusion/multi_output_fusion_pass.h"
#include <vector>
#include "backend/kernel_compiler/kernel_fusion.h"
#include "backend/session/anf_runtime_algorithm.h"
#include "base/core_ops.h"

View File

@ -16,8 +16,6 @@
#ifndef MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_PASS_MULTI_OUTPUT_FUSION_PASS_H_
#define MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_PASS_MULTI_OUTPUT_FUSION_PASS_H_
#include <vector>
#include "utils/hash_set.h"
#include "backend/optimizer/ascend/buffer_fusion/fusion_base_pass.h"
#include "ir/anf.h"
@ -29,8 +27,6 @@
namespace mindspore {
namespace opt {
using FusedNodeRecord = std::vector<mindspore::HashSet<AnfNodePtr>>;
class MultiOutputFusionPass : public FusionBasePass {
public:
explicit MultiOutputFusionPass(FusionIdAllocatorPtr idAllocator)

View File

@ -14,6 +14,7 @@
* limitations under the License.
*/
#include "backend/optimizer/ascend/buffer_fusion/reduce_eltwise_fusion_pass.h"
#include <vector>
#include "backend/kernel_compiler/kernel_fusion.h"
#include "debug/anf_ir_dump.h"
#include "backend/session/anf_runtime_algorithm.h"

View File

@ -16,8 +16,6 @@
#ifndef MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_REDUCE_ELTWISE_FUSION_PASS_H_
#define MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_REDUCE_ELTWISE_FUSION_PASS_H_
#include <vector>
#include "utils/hash_set.h"
#include "backend/optimizer/ascend/buffer_fusion/fusion_base_pass.h"
#include "ir/anf.h"
@ -29,8 +27,6 @@
namespace mindspore {
namespace opt {
using FusedNodeRecord = std::vector<mindspore::HashSet<AnfNodePtr>>;
class ReduceEltwiseFusionPass : public FusionBasePass {
public:
explicit ReduceEltwiseFusionPass(FusionIdAllocatorPtr idAllocator)

View File

@ -14,6 +14,7 @@
* limitations under the License.
*/
#include "backend/optimizer/ascend/buffer_fusion/segment_eltwise_fusion_pass.h"
#include <vector>
#include "backend/kernel_compiler/kernel_fusion.h"
#include "backend/session/anf_runtime_algorithm.h"
#include "base/core_ops.h"

View File

@ -16,8 +16,6 @@
#ifndef MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_SEGMENT_ELTWISE_FUSION_PASS_H_
#define MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_SEGMENT_ELTWISE_FUSION_PASS_H_
#include <vector>
#include "utils/hash_set.h"
#include "backend/optimizer/ascend/buffer_fusion/fusion_base_pass.h"
#include "ir/anf.h"
@ -29,8 +27,6 @@
namespace mindspore {
namespace opt {
using FusedNodeRecord = std::vector<mindspore::HashSet<AnfNodePtr>>;
class SegmentEltwiseFusionPass : public FusionBasePass {
public:
explicit SegmentEltwiseFusionPass(FusionIdAllocatorPtr idAllocator)

View File

@ -64,7 +64,7 @@ void StridedReadConvStridedWriteFusionPass::MatchStridedReadConvStridedWrite(con
void StridedReadConvStridedWriteFusionPass::MatchSingleFusionPattern(const session::KernelGraph &kernel_graph,
FusedNodeRecord *candidate_fusion) {
MS_EXCEPTION_IF_NULL(candidate_fusion);
std::vector<AnfNodePtr> node_list = TopoSort(kernel_graph.get_return());
const auto &node_list = TopoSort(kernel_graph.get_return());
for (auto &node : node_list) {
if (!AnfUtils::IsRealCNodeKernel(node) || fusion_id_allocator->HasFusionIdAttr(node) ||
AnfAlgo::CheckPrimitiveType(node, prim::kPrimReturn)) {

View File

@ -16,8 +16,6 @@
#ifndef MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_STRIDEDREAD_CONV_STRIDEDWRITE_FUSION_PASS_H_
#define MINDSPORE_CCSRC_BACKEND_OPTIMIZER_ASCEND_BUFFER_FUSION_STRIDEDREAD_CONV_STRIDEDWRITE_FUSION_PASS_H_
#include <vector>
#include "utils/hash_set.h"
#include "backend/optimizer/ascend/buffer_fusion/fusion_base_pass.h"
#include "ir/anf.h"
@ -29,8 +27,6 @@
namespace mindspore {
namespace opt {
using FusedNodeRecord = std::vector<mindspore::HashSet<AnfNodePtr>>;
class StridedReadConvStridedWriteFusionPass : public FusionBasePass {
public:
explicit StridedReadConvStridedWriteFusionPass(FusionIdAllocatorPtr idAllocator)

View File

@ -20,6 +20,7 @@
#include <memory>
#include <string>
#include "utils/hash_map.h"
#include "backend/kernel_compiler/common_utils.h"
#include "backend/kernel_compiler/tbe/tbe_kernel_compile.h"
#include "backend/kernel_compiler/tbe/tbe_utils.h"
#include "debug/anf_ir_dump.h"
@ -50,7 +51,7 @@ void DumpFusionScopeInfo(const kernel::FusionScopeInfo &info) {
}
for (auto &node : info.compute_nodes) {
MS_LOG(INFO) << "=== Compute: (" << node->DebugString() << ")-("
<< mindspore::kekernel::tbe::GetFusionTypeName(AnfAlgo::GetFusionType(node)) << ")";
<< mindspore::kernel::GetFusionNameByType(AnfAlgo::GetFusionType(node)) << ")";
}
MS_LOG(INFO) << "=== Dump FusionScopeInfo end";
}

View File

@ -137,6 +137,7 @@ bool UnVisited(const BaseRef &n);
bool Visited(const BaseRef &n);
// Create new cnode with dump flag and trace info maintained
CNodePtr NewCNode(const std::vector<AnfNodePtr> &inputs, const FuncGraphPtr &fg,
const std::vector<AnfNodePtr> &orig_nodes);
@ -218,7 +219,7 @@ bool GetBoolAttr(const AnfNodePtr &node, const std::string &attr_name);
// Check node's data type is in supported data type set
bool CheckSupportDataType(const AnfNodePtr &node, const std::set<TypeId> &supported_data_type_set);
// Create a new value node of func graph,not kernel graph
// Create a new value node of func graph, not kernel graph
ValueNodePtr MakeValueNode(const ValueNodePtr &value_node);
// Transfer depend or updatestate to the new node

View File

@ -13,7 +13,7 @@
# limitations under the License.
# ============================================================================
"""InitDataSetQueue op"""
"""DropoutGenMask op"""
from mindspore.ops.op_info_register import op_info_register, AiCPURegOp, DataType
dropout_genmask_op_info = AiCPURegOp("DropoutGenMask") \
@ -28,5 +28,5 @@ dropout_genmask_op_info = AiCPURegOp("DropoutGenMask") \
@op_info_register(dropout_genmask_op_info)
def _dropout_genmask_aicpu():
"""Dropout AiCPU register"""
"""DropoutGenMask AiCPU register"""
return

View File

@ -13,7 +13,7 @@
# limitations under the License.
# ============================================================================
"""InitDataSetQueue op"""
"""DropoutGenMaskV3 op"""
from mindspore.ops.op_info_register import op_info_register, AiCPURegOp, DataType
dropout_genmask_v3_op_info = AiCPURegOp("DropoutGenMaskV3") \
@ -28,5 +28,5 @@ dropout_genmask_v3_op_info = AiCPURegOp("DropoutGenMaskV3") \
@op_info_register(dropout_genmask_v3_op_info)
def _dropout_genmask_v3_aicpu():
"""Dropout AiCPU register"""
"""DropoutGenMaskV3 AiCPU register"""
return