!7440 add CSE for GraphKernel

Merge pull request !7440 from lingyunli63/GraphKernelCSE
This commit is contained in:
mindspore-ci-bot 2020-10-20 19:44:43 +08:00 committed by Gitee
commit 2744bad8b9
9 changed files with 245 additions and 56 deletions

View File

@ -14,30 +14,31 @@
* limitations under the License. * limitations under the License.
*/ */
#include "backend/kernel_compiler/akg/akg_kernel_json_decoder.h" #include "backend/kernel_compiler/akg/akg_kernel_json_decoder.h"
#include <string>
#include <memory>
#include <vector>
#include <sstream>
#include <algorithm> #include <algorithm>
#include <memory>
#include <sstream>
#include <string>
#include <unordered_map> #include <unordered_map>
#include <unordered_set> #include <unordered_set>
#include <vector>
#include "backend/kernel_compiler/akg/akg_kernel_json_generator.h" #include "backend/kernel_compiler/akg/akg_kernel_json_generator.h"
#include "ir/anf.h"
#include "ir/func_graph.h"
#include "ir/meta_tensor.h"
#include "ir/manager.h"
#include "ir/dtype.h"
#include "frontend/operator/ops.h"
#include "utils/convert_utils.h"
#include "utils/convert_utils_py.h"
#include "utils/utils.h"
#include "ir/graph_utils.h"
#include "runtime/device/kernel_info.h"
#include "pipeline/jit/parse/data_converter.h"
#include "pipeline/jit/parse/python_adapter.h"
#include "backend/kernel_compiler/common_utils.h" #include "backend/kernel_compiler/common_utils.h"
#include "backend/session/anf_runtime_algorithm.h" #include "backend/session/anf_runtime_algorithm.h"
#include "debug/anf_ir_dump.h" #include "debug/anf_ir_dump.h"
#include "frontend/operator/ops.h"
#include "ir/anf.h"
#include "ir/dtype.h"
#include "ir/func_graph.h"
#include "ir/graph_utils.h"
#include "ir/manager.h"
#include "ir/meta_tensor.h"
#include "pipeline/jit/parse/data_converter.h"
#include "pipeline/jit/parse/python_adapter.h"
#include "runtime/device/kernel_info.h"
#include "utils/convert_utils.h"
#include "utils/convert_utils_py.h"
#include "utils/utils.h"
namespace mindspore { namespace mindspore {
namespace kernel { namespace kernel {
@ -183,8 +184,16 @@ class CNodeDecoder {
const auto &inputs = cnode_->inputs(); const auto &inputs = cnode_->inputs();
for (size_t index = 1; index < inputs.size(); ++index) { for (size_t index = 1; index < inputs.size(); ++index) {
auto node = AnfAlgo::VisitKernel(inputs[index], 0); auto node = AnfAlgo::VisitKernel(inputs[index], 0);
if ((node.first)->isa<Parameter>()) {
auto parameter = (node.first)->cast<ParameterPtr>();
bool is_weight = AnfAlgo::IsParameterWeight(parameter);
kernel_info->SetFeatureMapFlag(!is_weight);
if (!is_weight) {
feature_map_input_indexs.push_back(index - 1);
}
}
if (AnfAlgo::IsFeatureMapOutput(node.first)) { if (AnfAlgo::IsFeatureMapOutput(node.first)) {
feature_map_input_indexs.push_back(index); feature_map_input_indexs.push_back(index - 1);
} }
} }
if (AnfAlgo::GetCNodeName(cnode_) == prim::kPrimCast->name()) { if (AnfAlgo::GetCNodeName(cnode_) == prim::kPrimCast->name()) {

View File

@ -0,0 +1,62 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "backend/optimizer/graph_kernel/graph_kernel_cse.h"
#include <memory>
#include "backend/session/anf_runtime_algorithm.h"
#include "runtime/device/kernel_info.h"
namespace mindspore {
namespace opt {
bool GraphKernelBackendCSE::CheckEqualKernelBuildInfo(const AnfNodePtr &main, const AnfNodePtr &node) const {
MS_EXCEPTION_IF_NULL(main);
MS_EXCEPTION_IF_NULL(node);
auto main_kernel_info = dynamic_cast<device::KernelInfo *>(main->kernel_info());
auto node_kernel_info = dynamic_cast<device::KernelInfo *>(node->kernel_info());
if (main_kernel_info == nullptr && node_kernel_info == nullptr) {
return true;
}
if (main_kernel_info != nullptr && node_kernel_info != nullptr) {
auto main_build_info = main_kernel_info->GetMutableSelectKernelBuildInfo();
auto node_build_info = node_kernel_info->GetMutableSelectKernelBuildInfo();
if (main_build_info == nullptr && node_build_info == nullptr) {
return true;
}
if (main_build_info == nullptr || node_build_info == nullptr) {
return false;
}
if (main_build_info->fusion_type() != node_build_info->fusion_type() ||
main_build_info->processor() != node_build_info->processor()) {
return false;
}
return main_build_info->IsSimilarityKernelBuildInfo(*node_build_info);
}
return false;
}
bool GraphKernelCSE::Run(const FuncGraphPtr &func_graph) {
MS_EXCEPTION_IF_NULL(func_graph);
auto graphkernel_backend_cse = std::make_shared<GraphKernelBackendCSE>();
return graphkernel_backend_cse->Cse(func_graph, func_graph->manager());
}
} // namespace opt
} // namespace mindspore

View File

@ -0,0 +1,38 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_BACKEND_OPTIMIZER_GRAPH_KERNEL_CSE_H_
#define MINDSPORE_CCSRC_BACKEND_OPTIMIZER_GRAPH_KERNEL_CSE_H_
#include "backend/optimizer/pass/common_subexpression_elimination.h"
namespace mindspore {
namespace opt {
class GraphKernelCSE : public Pass {
public:
GraphKernelCSE() : Pass("graph_kernel_cse") {}
~GraphKernelCSE() override = default;
bool Run(const FuncGraphPtr &func_graph) override;
};
class GraphKernelBackendCSE : public BackendCSE {
public:
GraphKernelBackendCSE() = default;
~GraphKernelBackendCSE() override = default;
bool CheckEqualKernelBuildInfo(const AnfNodePtr &main, const AnfNodePtr &node) const override;
};
} // namespace opt
} // namespace mindspore
#endif // MINDSPORE_CCSRC_BACKEND_OPTIMIZER_GRAPH_KERNEL_CSE_H_

View File

@ -16,19 +16,20 @@
#include "backend/optimizer/graph_kernel/graph_kernel_expander.h" #include "backend/optimizer/graph_kernel/graph_kernel_expander.h"
#include <vector>
#include <string> #include <string>
#include <unordered_set> #include <unordered_set>
#include <vector>
#include "backend/session/anf_runtime_algorithm.h"
#include "pipeline/jit/parse/python_adapter.h"
#include "mindspore/core/ir/graph_utils.h"
#include "backend/optimizer/graph_kernel/graph_kernel_helper.h"
#include "backend/kernel_compiler/akg/akg_kernel_json_generator.h" #include "backend/kernel_compiler/akg/akg_kernel_json_generator.h"
#include "vm/segment_runner.h"
#include "runtime/device/kernel_info.h"
#include "backend/kernel_compiler/common_utils.h" #include "backend/kernel_compiler/common_utils.h"
#include "backend/kernel_compiler/kernel_build_info.h" #include "backend/kernel_compiler/kernel_build_info.h"
#include "backend/optimizer/graph_kernel/graph_kernel_helper.h"
#include "backend/session/anf_runtime_algorithm.h"
#include "mindspore/core/ir/graph_utils.h"
#include "pipeline/jit/parse/python_adapter.h"
#include "pybind_api/ir/primitive_py.h"
#include "runtime/device/kernel_info.h"
#include "vm/segment_runner.h"
namespace mindspore { namespace mindspore {
namespace opt { namespace opt {
@ -187,11 +188,33 @@ bool GraphKernelExpander::DoExpand(const FuncGraphPtr &func_graph) {
// replace origin node. // replace origin node.
(void)mng->Replace(node, graph_kernel_node); (void)mng->Replace(node, graph_kernel_node);
ToPrimitive(AnfAlgo::GetCNodeFuncGraphPtr(graph_kernel_node));
changed = true; changed = true;
} }
return changed; return changed;
} }
void GraphKernelExpander::ToPrimitive(const FuncGraphPtr &func_graph) const {
auto todos = TopoSort(func_graph->get_return());
std::reverse(todos.begin(), todos.end());
auto mng = func_graph->manager();
MS_EXCEPTION_IF_NULL(mng);
for (const auto &n : todos) {
auto cnode = n->cast<CNodePtr>();
if (cnode == nullptr) {
continue;
}
auto origin_prim = AnfAlgo::GetCNodePrimitive(cnode);
MS_EXCEPTION_IF_NULL(origin_prim);
if (!origin_prim->isa<PrimitivePy>()) {
continue;
}
cnode->set_input(0, std::make_shared<ValueNode>(std::make_shared<Primitive>(*origin_prim)));
}
}
bool GraphKernelExpander::Run(const FuncGraphPtr &func_graph) { bool GraphKernelExpander::Run(const FuncGraphPtr &func_graph) {
expand_ops_ = GetExpandOps(); expand_ops_ = GetExpandOps();
MS_EXCEPTION_IF_NULL(func_graph); MS_EXCEPTION_IF_NULL(func_graph);

View File

@ -17,8 +17,8 @@
#define MINDSPORE_CCSRC_BACKEND_OPTIMIZER_GRAPH_KERNEL_GRAPH_KERNEL_EXPANDER_H_ #define MINDSPORE_CCSRC_BACKEND_OPTIMIZER_GRAPH_KERNEL_GRAPH_KERNEL_EXPANDER_H_
#include <memory> #include <memory>
#include <unordered_set> #include <unordered_set>
#include "ir/func_graph.h"
#include "backend/optimizer/common/pass.h" #include "backend/optimizer/common/pass.h"
#include "ir/func_graph.h"
namespace mindspore { namespace mindspore {
namespace opt { namespace opt {
@ -31,6 +31,7 @@ class GraphKernelExpander : public Pass {
private: private:
FuncGraphPtr CreateExpandFuncGraph(const CNodePtr &node); FuncGraphPtr CreateExpandFuncGraph(const CNodePtr &node);
bool DoExpand(const FuncGraphPtr &func_graph); bool DoExpand(const FuncGraphPtr &func_graph);
void ToPrimitive(const FuncGraphPtr &func_graph) const;
AnfNodePtr CreateExpandGraphKernel(const FuncGraphPtr &func_graph, const FuncGraphPtr &new_func_graph, AnfNodePtr CreateExpandGraphKernel(const FuncGraphPtr &func_graph, const FuncGraphPtr &new_func_graph,
const CNodePtr &node); const CNodePtr &node);
bool CanExpand(const CNodePtr &node) { bool CanExpand(const CNodePtr &node) {

View File

@ -14,15 +14,27 @@
* limitations under the License. * limitations under the License.
*/ */
#include "backend/optimizer/pass/common_subexpression_elimination.h" #include "backend/optimizer/pass/common_subexpression_elimination.h"
#include <memory> #include <memory>
#include "runtime/device/kernel_info.h"
#include "backend/session/anf_runtime_algorithm.h" #include "backend/session/anf_runtime_algorithm.h"
#include "runtime/device/kernel_info.h"
#include "utils/flags.h" #include "utils/flags.h"
namespace mindspore { namespace mindspore {
namespace opt { namespace opt {
namespace { namespace {
bool CheckEqualKernelBuildInfo(const AnfNodePtr &main, const AnfNodePtr &node) { bool HasSideEffectAttr(const AnfNodePtr &node) {
MS_EXCEPTION_IF_NULL(node);
auto cnode = node->cast<CNodePtr>();
MS_EXCEPTION_IF_NULL(cnode);
if (!AnfAlgo::HasNodeAttr(GRAPH_FLAG_SIDE_EFFECT, cnode)) {
return false;
}
return AnfAlgo::GetNodeAttr<bool>(cnode, GRAPH_FLAG_SIDE_EFFECT);
}
} // namespace
bool BackendCSE::CheckEqualKernelBuildInfo(const AnfNodePtr &main, const AnfNodePtr &node) const {
MS_EXCEPTION_IF_NULL(main); MS_EXCEPTION_IF_NULL(main);
MS_EXCEPTION_IF_NULL(node); MS_EXCEPTION_IF_NULL(node);
auto main_kernel_info = dynamic_cast<device::KernelInfo *>(main->kernel_info()); auto main_kernel_info = dynamic_cast<device::KernelInfo *>(main->kernel_info());
@ -36,17 +48,6 @@ bool CheckEqualKernelBuildInfo(const AnfNodePtr &main, const AnfNodePtr &node) {
return false; return false;
} }
bool HasSideEffectAttr(const AnfNodePtr &node) {
MS_EXCEPTION_IF_NULL(node);
auto cnode = node->cast<CNodePtr>();
MS_EXCEPTION_IF_NULL(cnode);
if (!AnfAlgo::HasNodeAttr(GRAPH_FLAG_SIDE_EFFECT, cnode)) {
return false;
}
return AnfAlgo::GetNodeAttr<bool>(cnode, GRAPH_FLAG_SIDE_EFFECT);
}
} // namespace
bool BackendCSE::CheckReplace(const AnfNodePtr &main, const AnfNodePtr &node, bool check_side_effect) const { bool BackendCSE::CheckReplace(const AnfNodePtr &main, const AnfNodePtr &node, bool check_side_effect) const {
MS_EXCEPTION_IF_NULL(main); MS_EXCEPTION_IF_NULL(main);
MS_EXCEPTION_IF_NULL(node); MS_EXCEPTION_IF_NULL(node);

View File

@ -32,6 +32,7 @@ class BackendCSE : public CSE {
BackendCSE() = default; BackendCSE() = default;
~BackendCSE() override = default; ~BackendCSE() override = default;
bool CheckReplace(const AnfNodePtr &main, const AnfNodePtr &node, bool check_side_effect = true) const override; bool CheckReplace(const AnfNodePtr &main, const AnfNodePtr &node, bool check_side_effect = true) const override;
virtual bool CheckEqualKernelBuildInfo(const AnfNodePtr &main, const AnfNodePtr &node) const;
}; };
} // namespace opt } // namespace opt
} // namespace mindspore } // namespace mindspore

View File

@ -15,15 +15,9 @@
*/ */
#include "backend/session/gpu_session.h" #include "backend/session/gpu_session.h"
#include "runtime/device/gpu/kernel_info_setter.h" #include "backend/optimizer/common/helper.h"
#include "runtime/device/gpu/gpu_kernel_build.h"
#include "runtime/device/gpu/gpu_kernel_runtime.h"
#include "runtime/device/gpu/gpu_stream_assign.h"
#include "backend/optimizer/common/optimizer.h" #include "backend/optimizer/common/optimizer.h"
#include "backend/optimizer/common/pass_manager.h" #include "backend/optimizer/common/pass_manager.h"
#include "backend/optimizer/common/helper.h"
#include "backend/optimizer/pass/communication_op_fusion.h"
#include "backend/optimizer/pass/getitem_tuple.h"
#include "backend/optimizer/gpu/adam_weight_decay_fusion.h" #include "backend/optimizer/gpu/adam_weight_decay_fusion.h"
#include "backend/optimizer/gpu/adam_fusion.h" #include "backend/optimizer/gpu/adam_fusion.h"
#include "backend/optimizer/gpu/apply_momentum_weight_scale_fusion.h" #include "backend/optimizer/gpu/apply_momentum_weight_scale_fusion.h"
@ -32,27 +26,34 @@
#include "backend/optimizer/gpu/batch_norm_relu_grad_fusion.h" #include "backend/optimizer/gpu/batch_norm_relu_grad_fusion.h"
#include "backend/optimizer/gpu/batch_norm_add_relu_fusion.h" #include "backend/optimizer/gpu/batch_norm_add_relu_fusion.h"
#include "backend/optimizer/gpu/batch_norm_add_relu_grad_fusion.h" #include "backend/optimizer/gpu/batch_norm_add_relu_grad_fusion.h"
#include "backend/optimizer/gpu/cudnn_inplace_fusion.h"
#include "backend/optimizer/gpu/insert_format_transform_op.h"
#include "backend/optimizer/gpu/replace_momentum_cast_fusion.h" #include "backend/optimizer/gpu/replace_momentum_cast_fusion.h"
#include "backend/optimizer/gpu/replace_addn_fusion.h" #include "backend/optimizer/gpu/replace_addn_fusion.h"
#include "backend/optimizer/gpu/insert_format_transform_op.h"
#include "backend/optimizer/gpu/remove_format_transform_pair.h" #include "backend/optimizer/gpu/remove_format_transform_pair.h"
#include "backend/optimizer/gpu/remove_redundant_format_transform.h" #include "backend/optimizer/gpu/remove_redundant_format_transform.h"
#include "backend/optimizer/gpu/cudnn_inplace_fusion.h"
#include "backend/optimizer/gpu/reduce_precision_fusion.h" #include "backend/optimizer/gpu/reduce_precision_fusion.h"
#include "backend/optimizer/graph_kernel/value_graph_binder.h" #include "backend/optimizer/graph_kernel/arithmetic_simplify.h"
#include "backend/optimizer/graph_kernel/graph_kernel_splitter.h"
#include "backend/optimizer/graph_kernel/graph_kernel_expander.h"
#include "backend/optimizer/graph_kernel/basic_ops_fusion.h" #include "backend/optimizer/graph_kernel/basic_ops_fusion.h"
#include "backend/optimizer/graph_kernel/composite_ops_fusion.h" #include "backend/optimizer/graph_kernel/composite_ops_fusion.h"
#include "backend/optimizer/graph_kernel/arithmetic_simplify.h" #include "backend/optimizer/graph_kernel/graph_kernel_splitter.h"
#include "runtime/device/kernel_runtime_manager.h" #include "backend/optimizer/graph_kernel/graph_kernel_expander.h"
#include "utils/ms_utils.h" #include "backend/optimizer/graph_kernel/graph_kernel_cse.h"
#include "utils/config_manager.h" #include "backend/optimizer/graph_kernel/value_graph_binder.h"
#include "backend/optimizer/pass/communication_op_fusion.h"
#include "backend/optimizer/pass/getitem_tuple.h"
#include "common/trans.h" #include "common/trans.h"
#include "utils/ms_context.h"
#include "debug/data_dump/e2e_dump_util.h" #include "debug/data_dump/e2e_dump_util.h"
#include "debug/tensor_load.h" #include "debug/tensor_load.h"
#include "debug/dump_proto.h" #include "debug/dump_proto.h"
#include "runtime/device/gpu/gpu_kernel_build.h"
#include "runtime/device/gpu/gpu_kernel_runtime.h"
#include "runtime/device/gpu/gpu_stream_assign.h"
#include "runtime/device/gpu/kernel_info_setter.h"
#include "runtime/device/kernel_runtime_manager.h"
#include "utils/ms_utils.h"
#include "utils/config_manager.h"
#include "utils/ms_context.h"
namespace mindspore { namespace mindspore {
namespace session { namespace session {
@ -119,7 +120,9 @@ void GPUSession::GraphKernelOptimize(const std::shared_ptr<KernelGraph> &kernel_
pm->AddPass(std::make_shared<opt::GraphKernelExpander>()); pm->AddPass(std::make_shared<opt::GraphKernelExpander>());
pm->AddPass(std::make_shared<opt::BasicOpsFusion>()); pm->AddPass(std::make_shared<opt::BasicOpsFusion>());
pm->AddPass(std::make_shared<opt::CompositeOpsFusion>()); pm->AddPass(std::make_shared<opt::CompositeOpsFusion>());
pm->AddPass(std::make_shared<opt::GraphKernelCSE>());
pm->AddPass(std::make_shared<opt::ArithmeticSimplify>()); pm->AddPass(std::make_shared<opt::ArithmeticSimplify>());
pm->AddPass(std::make_shared<opt::GraphKernelCSE>());
pm->AddPass(std::make_shared<opt::GraphKernelSplitter>()); pm->AddPass(std::make_shared<opt::GraphKernelSplitter>());
// After Simplify and Splitter, a lot of redundant getitem/maketuple // After Simplify and Splitter, a lot of redundant getitem/maketuple
// will be exposed, use GetitemTuple Pass to delete them. // will be exposed, use GetitemTuple Pass to delete them.

View File

@ -0,0 +1,51 @@
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore.context as context
from mindspore import Tensor
from mindspore.nn import Cell
import mindspore.ops.operations as P
context.set_context(mode=context.GRAPH_MODE, enable_graph_kernel=True, device_target="GPU")
class Net(Cell):
def __init__(self):
super(Net, self).__init__()
self.add = P.TensorAdd()
self.mul = P.Mul()
def construct(self, x):
mul_res = self.mul(x, x)
square_res = P.Square()(x)
return self.add(mul_res, square_res)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_basic():
input_x = np.random.normal(0, 1, [2, 3, 4, 3]).astype(np.float32)
mul_res = input_x * input_x
square_res = np.square(input_x)
expect = mul_res + square_res
net = Net()
result = net(Tensor(input_x))
res = np.allclose(expect, result.asnumpy(), rtol=1.e-4, atol=1.e-7, equal_nan=True)
assert res