!24114 Fix code review warnings

Merge pull request !24114 from zyli2020/code_refactor
This commit is contained in:
i-robot 2021-09-26 08:36:39 +00:00 committed by Gitee
commit adfb65aaac
4 changed files with 34 additions and 5 deletions

View File

@ -261,6 +261,7 @@ void UpdateDeviceAddressForInplaceNode(const KernelGraphPtr &graph) {
}
void SetSummaryNodesRefCount(const KernelGraph *graph) {
MS_EXCEPTION_IF_NULL(graph);
if (!graph->summary_node_exist()) {
return;
}
@ -315,6 +316,7 @@ GraphId GraphCompiler::CompileGraph(const AnfNodePtrList &nodes, const AnfNodePt
GraphId GraphCompiler::CompileGraph(const FuncGraphPtr &func_graph, const DeviceContext *device_context) {
MS_EXCEPTION_IF_NULL(session_);
MS_EXCEPTION_IF_NULL(func_graph);
// Generate kernel graph.
std::vector<KernelGraphPtr> all_graphs;
KernelGraphPtr root_graph = session_->ConstructKernelGraph(func_graph, &all_graphs);
@ -507,6 +509,7 @@ TensorPtr GraphCompiler::GetSingleOpInputTensorByIndex(const CNodePtr &kernel,
void GraphCompiler::GetSingleOpRunInfoAndGraphInfo(const CNodePtr &kernel, const std::vector<TensorPtr> &input_tensors,
OpRunInfo *const run_info, GraphInfo *const graph_info) {
MS_EXCEPTION_IF_NULL(session_);
MS_EXCEPTION_IF_NULL(graph_info);
session_->GetSingleOpRunInfo(kernel, run_info);
*graph_info = session_->GetSingleOpGraphInfo(kernel, input_tensors);
}

View File

@ -114,6 +114,7 @@ void CPUDeviceContext::OptimizeSingleOpGraph(const KernelGraphPtr &graph) const
}
void CPUDeviceContext::OptimizeGraphImpl(const KernelGraphPtr &graph) const {
MS_EXCEPTION_IF_NULL(graph);
auto optimizer = std::make_shared<opt::GraphOptimizer>();
auto pm = std::make_shared<opt::PassManager>();
pm->AddPass(std::make_shared<opt::InsertFormatTransformOpCPU>("insert_format_transform_op_cpu"));
@ -190,6 +191,7 @@ void CPUDeviceContext::CreateKernel(const std::vector<CNodePtr> &nodes) const {
namespace {
void ProcessCast(const KernelGraphPtr &graph) {
MS_EXCEPTION_IF_NULL(graph);
auto optimizer = std::make_shared<opt::GraphOptimizer>();
auto pm = std::make_shared<opt::PassManager>();
pm->AddPass(std::make_shared<opt::InsertCastCPU>("insert_cast_cpu"));
@ -202,6 +204,7 @@ void ProcessCast(const KernelGraphPtr &graph) {
} // namespace
void CPUDeviceContext::PreprocessBeforeRunGraph(const KernelGraphPtr &graph) const {
MS_EXCEPTION_IF_NULL(graph);
ProcessCast(graph);
// Remove reorder after PS feature finish adapting push/pull in auto_monad.

View File

@ -235,6 +235,7 @@ void GPUDeviceContext::OptimizeGraphWithoutDeviceInfo(const KernelGraphPtr &grap
}
void GPUDeviceContext::OptimizeGraphWithDeviceInfo(const KernelGraphPtr &graph) const {
MS_EXCEPTION_IF_NULL(graph);
auto ms_context = MsContext::GetInstance();
MS_EXCEPTION_IF_NULL(ms_context);
// Graph optimization relevant to device data format
@ -267,6 +268,7 @@ void GPUDeviceContext::OptimizeGraphWithDeviceInfo(const KernelGraphPtr &graph)
}
void GPUDeviceContext::FuseOperators(const KernelGraphPtr &graph) const {
MS_EXCEPTION_IF_NULL(graph);
auto optimizer = std::make_shared<opt::GraphOptimizer>();
auto pm = std::make_shared<opt::PassManager>();
pm->AddPass(std::make_shared<opt::MatMulBiasAddFusion>());
@ -291,6 +293,7 @@ void GPUDeviceContext::FuseOperators(const KernelGraphPtr &graph) const {
void GPUDeviceContext::UpdateGraphDynamicShapeAttr(const NotNull<KernelGraphPtr> &graph) const {
for (const auto &cnode : graph->execution_order()) {
MS_EXCEPTION_IF_NULL(cnode);
if (AnfAlgo::IsNodeDynamicShape(cnode)) {
AnfAlgo::SetNodeAttr(kAttrIsDynamicShape, MakeValue(true), cnode);
MS_LOG(INFO) << "Set Dynamic Shape Attr to Node:" << cnode->fullname_with_scope();
@ -312,6 +315,7 @@ void RunOpOptimize(const KernelGraphPtr &kernel_graph) {
}
void RunOpHardwareOptimize(const KernelGraphPtr &kernel_graph) {
MS_EXCEPTION_IF_NULL(kernel_graph);
auto optimizer = std::make_shared<opt::GraphOptimizer>();
auto pm = std::make_shared<opt::PassManager>();
pm->AddPass(std::make_shared<opt::ReducePrecisionFusion>("reduce_precision"));
@ -501,6 +505,11 @@ uint32_t GPUDeviceContext::GetRankID() const {
std::shared_ptr<Bucket> GPUDeviceContext::CreateBucket(uint32_t bucket_id, uint32_t bucket_size) const {
auto bucket = std::make_shared<GPUBucket>(bucket_id, bucket_size);
MS_EXCEPTION_IF_NULL(bucket);
// One computation stream, one communication stream.
const size_t min_num_of_stream = 2;
if (min_num_of_stream > streams_.size()) {
MS_LOG(EXCEPTION) << "The total stream num: " << streams_.size() << " is less than: " << min_num_of_stream;
}
bucket->Init({streams_[0]}, {streams_[1]});
return bucket;

View File

@ -417,6 +417,7 @@ bool MindRTBackend::CompileGraph(const FuncGraphPtr &func_graph) {
MS_LOG(INFO) << "Compile graph: " << func_graph->ToString() << ", Split segments size:" << segments.size();
const auto &device_context =
device::DeviceContextManager::GetInstance().GetOrCreateDeviceContext({device_name_, device_id_});
MS_EXCEPTION_IF_NULL(device_context);
const auto &new_segments = device_context->PartitionGraph(func_graph, segments);
// Compile the whole function graph if not split graph.
@ -447,6 +448,7 @@ void MindRTBackend::CompileGraph(const GraphSegmentPtr &segment, bool contain_mu
const auto &cur_device_name = GetCNodeTarget(segment->nodes_[0]);
const auto &device_context =
device::DeviceContextManager::GetInstance().GetOrCreateDeviceContext({cur_device_name, device_id_});
MS_EXCEPTION_IF_NULL(device_context);
device_context->Initialize();
// Transform nodes to inputs and outputs.
@ -487,6 +489,7 @@ const ActorInfo &MindRTBackend::CompileGraph(const OpRunInfo &op_run_info, const
// Get the device context.
const auto &device_context =
device::DeviceContextManager::GetInstance().GetOrCreateDeviceContext({device_name_, device_id_});
MS_EXCEPTION_IF_NULL(device_context);
device_context->Initialize();
bool single_op_cache_hit = true;
@ -527,6 +530,7 @@ void GetControlOpInput(const std::shared_ptr<GraphCompiler> &graph_compiler, con
MS_EXCEPTION_IF_NULL(front_cnode);
MS_EXCEPTION_IF_NULL(backend_cnode);
MS_EXCEPTION_IF_NULL(graph_compiler);
MS_EXCEPTION_IF_NULL(args);
size_t input_index = 0;
auto inputs = front_cnode->inputs();
for (size_t i = 1; i < inputs.size(); i++) {
@ -596,12 +600,11 @@ void ConvertMultiPyObjectToTensor(const py::object &input_object, std::vector<te
MS_LOG(EXCEPTION) << "The input should be a tuple!";
}
auto tuple_inputs = py::cast<py::tuple>(input_object);
if (tuple_inputs.empty()) {
auto inputs = py::cast<py::tuple>(input_object);
if (inputs.empty()) {
MS_LOG(EXCEPTION) << "The size of input list or tuple is 0!";
}
auto inputs = py::cast<py::tuple>(input_object);
if (py::isinstance<tensor::Tensor>(inputs[0])) {
PlantTensorTupleToVector(inputs, tensors);
} else {
@ -615,12 +618,15 @@ void RunControlOperator(const std::shared_ptr<GraphCompiler> &graph_compiler, co
const std::vector<tensor::TensorPtr> &graph_inputs, InputTensorInfo *input_tensor_info,
VectorRef *op_outputs) {
MS_EXCEPTION_IF_NULL(graph);
MS_EXCEPTION_IF_NULL(kernel);
MS_EXCEPTION_IF_NULL(op_outputs);
AnfNodePtr front_node = graph->GetFrontAnfByBackendAnf(kernel);
MS_EXCEPTION_IF_NULL(front_node);
if (!front_node->isa<CNode>()) {
MS_LOG(EXCEPTION) << "The front node of bprop_cut is not CNode";
}
CNodePtr cnode = front_node->cast<CNodePtr>();
MS_EXCEPTION_IF_NULL(cnode);
const std::vector<AnfNodePtr> &node_inputs = cnode->inputs();
if (node_inputs.empty()) {
MS_LOG(EXCEPTION) << "The inputs of node[" << cnode->fullname_with_scope() << "] is empty";
@ -633,6 +639,7 @@ void RunControlOperator(const std::shared_ptr<GraphCompiler> &graph_compiler, co
}
PrimitivePtr prim = GetValueNode<PrimitivePtr>(fn);
MS_EXCEPTION_IF_NULL(prim);
if (prim->name() == kBpropCutOpName) {
VectorRef args;
GetControlOpInput(graph_compiler, cnode, kernel, op_output_map, parameter_index, graph_inputs, input_tensor_info,
@ -798,7 +805,8 @@ void MindRTBackend::RunGraph(const ActorInfo &actor_info, const VectorRef &args,
if (graph_iter == actor_to_graph_compiler_info_.end()) {
MS_LOG(EXCEPTION) << "Can't find the graph compiler info.";
}
const auto &graph_compiler_info = *(graph_iter->second.get());
MS_EXCEPTION_IF_NULL(graph_iter->second);
const auto &graph_compiler_info = *(graph_iter->second);
const auto &origin_parameters = graph_compiler_info.origin_parameters_order_;
// Transform args to input tensors.
@ -842,6 +850,9 @@ void MindRTBackend::RunGraph(const ActorInfo &actor_info, const VectorRef &args,
MS_LOG(EXCEPTION) << "The actor runs failed, actor name: " << actor_set->name_;
}
if (graph_compiler_info.device_contexts_.empty()) {
MS_LOG(EXCEPTION) << "The device contexts is empty.";
}
// Sync device stream.
const auto &first_device_context = graph_compiler_info.device_contexts_[0];
MS_EXCEPTION_IF_NULL(first_device_context);
@ -877,6 +888,7 @@ void MindRTBackend::ConstructOutputs(const AnfNodePtr &output_node,
VectorRef *outputs) {
MS_EXCEPTION_IF_NULL(output_node);
MS_EXCEPTION_IF_NULL(outputs);
MS_EXCEPTION_IF_NULL(output_position);
// The makeTuple node need expand and recurse.
if (AnfAlgo::CheckPrimitiveType(output_node, prim::kPrimMakeTuple)) {
auto make_tuple = output_node->cast<CNodePtr>();
@ -994,7 +1006,6 @@ std::unique_ptr<GraphCompilerInfo> MindRTBackend::ConstructGraphCompilerInfo(con
std::unique_ptr<GraphCompilerInfo> MindRTBackend::ConstructGraphCompilerInfo(
const ActorInfo &actor_info, const std::vector<int64_t> *tensors_mask,
const std::vector<tensor::TensorPtr> *input_tensors, bool need_erase) {
MS_EXCEPTION_IF_NULL(graph_compiler_);
std::vector<KernelGraphPtr> graphs;
std::vector<DeviceContext *> device_contexts;
runtime::KernelMapPosition outputs_order;
@ -1027,10 +1038,12 @@ std::unique_ptr<GraphCompilerInfo> MindRTBackend::ConstructGraphCompilerInfo(
}
void MindRTBackend::EraseSingleOpCache(const ActorInfo &actor_info, const KernelGraphPtr &graph) {
MS_EXCEPTION_IF_NULL(graph);
if (graph_info_to_device_context_.empty()) {
MS_LOG(EXCEPTION) << "The map graph_info_to_device_context_ is empty.";
}
const auto &graph_info = graph_info_to_device_context_.begin()->first;
MS_EXCEPTION_IF_NULL(graph_compiler_);
graph_compiler_->EraseSingleOpCache(graph_info, graph->graph_id());
actor_to_graph_compiler_info_.erase(actor_info);
}
@ -1045,6 +1058,7 @@ void MindRTBackend::RunGraph(const ActorInfo &actor_info, OpRunInfo *op_run_info
if (graph_iter == actor_to_graph_compiler_info_.end()) {
MS_LOG(EXCEPTION) << "Can't find the graph compiler info.";
}
MS_EXCEPTION_IF_NULL(graph_iter->second);
const auto &graph_compiler_info = *(graph_iter->second);
const auto &actor_set = runtime::GraphScheduler::GetInstance().Fetch(actor_info);