self code check

This commit is contained in:
zhaosida 2021-09-27 11:12:55 +08:00
parent 13a48747a8
commit 85e00ba3ab
27 changed files with 90 additions and 27 deletions

View File

@ -50,6 +50,7 @@ void AicpuMetadataInfo(const CNodePtr &kernel_node, std::vector<std::shared_ptr<
void AicpuMetadataInfoForSpecialNodes(const CNodePtr &kernel_node,
std::vector<std::shared_ptr<KernelBuildInfo>> *kernel_info_list) {
MS_EXCEPTION_IF_NULL(kernel_info_list);
std::vector<std::string> inputs_format{};
std::vector<TypeId> inputs_type{};
auto op_name = AnfAlgo::GetCNodeName(kernel_node);
@ -57,7 +58,7 @@ void AicpuMetadataInfoForSpecialNodes(const CNodePtr &kernel_node,
size_t input_num = AnfAlgo::GetInputTensorNum(kernel_node);
for (size_t input_index = 0; input_index < input_num; ++input_index) {
inputs_format.emplace_back(kOpFormat_DEFAULT);
inputs_type.push_back(AnfAlgo::GetPrevNodeOutputInferDataType(kernel_node, input_index));
(void)inputs_type.emplace_back(AnfAlgo::GetPrevNodeOutputInferDataType(kernel_node, input_index));
}
}
std::vector<std::string> outputs_format;
@ -65,7 +66,7 @@ void AicpuMetadataInfoForSpecialNodes(const CNodePtr &kernel_node,
size_t output_num = AnfAlgo::GetOutputTensorNum(kernel_node);
for (size_t output_index = 0; output_index < output_num; ++output_index) {
outputs_format.emplace_back(kOpFormat_DEFAULT);
outputs_type.push_back(AnfAlgo::GetOutputInferDataType(kernel_node, output_index));
(void)outputs_type.emplace_back(AnfAlgo::GetOutputInferDataType(kernel_node, output_index));
}
auto builder = KernelBuildInfo::KernelBuildInfoBuilder();
builder.SetInputsFormat(inputs_format);
@ -75,7 +76,7 @@ void AicpuMetadataInfoForSpecialNodes(const CNodePtr &kernel_node,
builder.SetProcessor(AICPU);
builder.SetKernelType(AICPU_KERNEL);
builder.SetFusionType(OPAQUE);
kernel_info_list->push_back(builder.Build());
(void)kernel_info_list->emplace_back(builder.Build());
return;
}
} // namespace kernel

View File

@ -57,19 +57,19 @@ std::vector<std::vector<int64_t>> GetGradientIndices(const std::vector<std::vect
// All dimensions are 1.
if (!output_dim_set) {
for (int i = 0; i < kInputNum; ++i) {
grad_reduce_idx[i].push_back(largest_rank - 1 - j);
(void)grad_reduce_idx[i].emplace_back(largest_rank - 1 - j);
}
continue;
} else if (std::equal(current_is_one, current_is_one + kInputNum, prev_is_one) && set_one) {
for (int i = 0; i < kInputNum; ++i) {
if (current_is_one[i] && !none_is_one) {
grad_reduce_idx[i].push_back(largest_rank - 1 - j);
(void)grad_reduce_idx[i].emplace_back(largest_rank - 1 - j);
}
}
} else {
for (int i = 0; i < kInputNum; ++i) {
if (current_is_one[i] && !none_is_one) {
grad_reduce_idx[i].push_back(largest_rank - 1 - j);
(void)grad_reduce_idx[i].emplace_back(largest_rank - 1 - j);
}
}
}
@ -131,6 +131,7 @@ std::vector<int64_t> GetInputShape(const CNodePtr &cnode, size_t index) {
auto x_shape_value = std::make_shared<tensor::Tensor>(type_x, x);
// The second parameter must be false, otherwise the device address cannot be released and allocated, and the
// address size will be wrong in the dynamic shape scenario.
MS_EXCEPTION_IF_NULL(x_shape_value);
x_shape_value->set_device_address(address_x, false);
x_shape_value->data_sync();
@ -163,6 +164,7 @@ size_t SetOutputValue(const CNodePtr &cnode, const std::vector<std::vector<int64
std::vector<int64_t> out_shape{SizeToLong(out_size)};
auto output_type = TypeId::kNumberTypeInt64;
auto tensor_for_sync = std::make_shared<tensor::Tensor>(output_type, out_shape);
MS_EXCEPTION_IF_NULL(tensor_for_sync);
auto data_ptr = static_cast<int64_t *>(tensor_for_sync->data_c());
for (size_t i = 0; i < out_size; ++i) {

View File

@ -34,6 +34,7 @@ void DynamicShapeKernel::Execute() {
auto output_type = TypeId::kNumberTypeInt64;
auto output_tensor_for_sync = std::make_shared<tensor::Tensor>(output_type, output_shape);
MS_EXCEPTION_IF_NULL(output_tensor_for_sync);
auto data_ptr = static_cast<int64_t *>(output_tensor_for_sync->data_c());
for (size_t i = 0; i < prev_output_shape.size(); ++i) {
MS_LOG(INFO) << "DEBUG prev_output_shape[" << i << "]:" << prev_output_shape[i];

View File

@ -45,6 +45,9 @@ bool TensorCopySlices::Launch(const std::vector<AddressPtr> &inputs, const std::
MS_LOG(ERROR) << "outputs size is not 1";
return false;
}
MS_EXCEPTION_IF_NULL(outputs[0]);
MS_EXCEPTION_IF_NULL(inputs[0]);
MS_EXCEPTION_IF_NULL(inputs[1]);
if (outputs[0]->size != inputs[0]->size) {
MS_LOG(ERROR) << "TensorCopySlices destMax > src size";
return false;
@ -137,6 +140,9 @@ std::vector<TaskInfoPtr> TensorCopySlices::GenTask(const std::vector<AddressPtr>
if (outputs.size() != 1) {
MS_LOG(EXCEPTION) << "outputs size is not 1.";
}
MS_EXCEPTION_IF_NULL(outputs[0]);
MS_EXCEPTION_IF_NULL(inputs[0]);
MS_EXCEPTION_IF_NULL(inputs[1]);
if (outputs[0]->size != inputs[0]->size) {
MS_LOG(EXCEPTION) << "TensorCopySlices input size " << inputs[0]->size << " is not equal to output size "
<< outputs[0]->size;

View File

@ -878,8 +878,6 @@ void GetOutputSizeList(const nlohmann::json &output_json, std::vector<size_t> *o
bool TbeKernelBuild::GetIOSize(const nlohmann::json &kernel_json, std::vector<size_t> *input_size_list,
std::vector<size_t> *output_size_list) {
MS_EXCEPTION_IF_NULL(output_size_list);
MS_EXCEPTION_IF_NULL(input_size_list);
if (input_size_list == nullptr || output_size_list == nullptr) {
MS_LOG(ERROR) << "Input size or output size is nullptr";
return false;

View File

@ -327,6 +327,7 @@ CNodePtr AddCastOpNodeToGraph(const FuncGraphPtr &func_graph, const AnfNodePtr &
const abstract::BaseShapePtr &origin_shape, const TypeId &origin_type,
const std::string &reshape_type) {
MS_EXCEPTION_IF_NULL(func_graph);
MS_EXCEPTION_IF_NULL(origin_shape);
std::string input_format = format;
std::string output_format = format;
CNodePtr cast = func_graph->NewCNode({NewValueNode(std::make_shared<Primitive>(prim::kPrimCast->name())), input});

View File

@ -49,6 +49,7 @@ void BatchMatmulFusedMulAddFusionPass::MatchSingleFusionPattern(const session::K
}
std::vector<AnfNodePtr> node_list = TopoSort(kernel_graph.get_return());
for (auto &node : node_list) {
MS_EXCEPTION_IF_NULL(node);
if (!AnfAlgo::IsRealCNodeKernel(node) || fusion_id_allocator->HasFusionIdAttr(node) ||
AnfAlgo::CheckPrimitiveType(node, prim::kPrimReturn)) {
continue;

View File

@ -65,7 +65,7 @@ CNodePtr CreateFusionOp(const std::vector<AnfNodePtr> &inputs_list, const std::v
MS_LOG(DEBUG) << "Start Create FusionOp Kernel";
MS_EXCEPTION_IF_NULL(kernel_graph);
std::string fusion_op_name = "FusionOp";
for (auto node : anf_nodes) {
for (auto &node : anf_nodes) {
fusion_op_name += '_' + AnfAlgo::GetCNodeName(node);
}
auto fusion_op = std::make_shared<Primitive>(fusion_op_name);
@ -84,7 +84,8 @@ CNodePtr CreateFusionOp(const std::vector<AnfNodePtr> &inputs_list, const std::v
ValuePtr output_names_v = MakeValue(output_names);
fusion_op->set_attr("input_names", input_names_v);
fusion_op->set_attr("output_names", output_names_v);
for (auto node : anf_nodes) {
for (auto &node : anf_nodes) {
MS_EXCEPTION_IF_NULL(node);
auto cnode = node->cast<CNodePtr>();
if (AnfAlgo::HasNodeAttr(kAttrFracZGroup, cnode)) {
auto fracz_group = AnfAlgo::GetNodeAttr<int64_t>(node, kAttrFracZGroup);

View File

@ -132,6 +132,7 @@ const BaseRef ConcatOutputsForAllGather::DefinePattern() const {
const AnfNodePtr ConcatOutputsForAllGather::Process(const FuncGraphPtr &func_graph, const AnfNodePtr &node,
const EquivPtr &) const {
MS_EXCEPTION_IF_NULL(func_graph);
MS_EXCEPTION_IF_NULL(node);
auto cnode = node->cast<CNodePtr>();
MS_EXCEPTION_IF_NULL(cnode);

View File

@ -62,6 +62,7 @@ const AnfNodePtr GetnextTensorMoveElimination::Process(const FuncGraphPtr &graph
// 3. next_node is not nop node, not communicaiton node, not graph output and it has only one input which is tensor
// move's output
for (auto &item : next_nodes) {
MS_EXCEPTION_IF_NULL(item.first);
auto next_node = item.first->cast<CNodePtr>();
if (opt::IsNopNode(next_node)) {
return nullptr;

View File

@ -100,6 +100,7 @@ AnfNodePtr InsertTensorMoveForCascade::InsertTensorMove(const FuncGraphPtr &grap
if (!tensor_move_list.empty()) {
CNodePtr new_hccl_node = std::make_shared<CNode>(*hccl_node);
MS_EXCEPTION_IF_NULL(new_hccl_node);
new_hccl_node->set_inputs(new_inputs);
return new_hccl_node;
}

View File

@ -48,6 +48,7 @@ bool IsNodeOutPutUsedByOtherRealKernel(const AnfNodeIndexSet &node_users) {
}
for (const auto &node_pair : node_users) {
auto node = node_pair.first;
MS_EXCEPTION_IF_NULL(node);
if (AnfAlgo::IsRealKernel(node) && !AnfAlgo::IsCommunicationOp(node)) {
MS_LOG(INFO) << "This node only used other real kernel: " << node->fullname_with_scope();
return true;

View File

@ -164,6 +164,7 @@ CNodePtr DealRefAndSpiltUnSupportedTransdata::MakeDependency(const CNodePtr &get
}
CNodePtr DealRefAndSpiltUnSupportedTransdata::DealRefForMultipleOutput(
const FuncGraphPtr &func_graph, const CNodePtr &orig_cnode, const std::shared_ptr<kernel::OpInfo> &op_info) const {
MS_EXCEPTION_IF_NULL(func_graph);
auto manager = func_graph->manager();
MS_EXCEPTION_IF_NULL(manager);
auto cnode = orig_cnode;
@ -172,6 +173,7 @@ CNodePtr DealRefAndSpiltUnSupportedTransdata::DealRefForMultipleOutput(
auto kernel_graph = func_graph->cast<KernelGraphPtr>();
MS_EXCEPTION_IF_NULL(kernel_graph);
cnode = kernel_graph->NewCNode(orig_cnode);
MS_EXCEPTION_IF_NULL(cnode);
cnode->set_inputs(orig_cnode->inputs());
for (auto &update_state : update_states) {
manager->SetEdge(update_state.first, update_state.second, cnode);
@ -181,7 +183,7 @@ CNodePtr DealRefAndSpiltUnSupportedTransdata::DealRefForMultipleOutput(
auto ref_infos = op_info->ref_infos();
std::vector<AnfNodePtr> make_tuple_inputs;
AbstractBasePtrList abstract_list;
make_tuple_inputs.push_back(NewValueNode(prim::kPrimMakeTuple));
make_tuple_inputs.emplace_back(NewValueNode(prim::kPrimMakeTuple));
size_t output_num = AnfAlgo::GetOutputTensorNum(cnode);
for (size_t output_index = 0; output_index < output_num; ++output_index) {
CNodePtr final_node = CreatTupleGetItemNode(func_graph, cnode, output_index);
@ -284,13 +286,16 @@ CNodePtr DealRefAndSpiltUnSupportedTransdata::SplitTransdataIfNotSupported(const
// When input and output format are all special format
// the node should be splited to two transdata connected by default format
auto builder_info_to_default = std::make_shared<kernel::KernelBuildInfo::KernelBuildInfoBuilder>(kernel_info);
MS_EXCEPTION_IF_NULL(builder_info_to_default);
auto builder_info_to_special_foramt = std::make_shared<kernel::KernelBuildInfo::KernelBuildInfoBuilder>(kernel_info);
MS_EXCEPTION_IF_NULL(builder_info_to_special_foramt);
builder_info_to_default->SetOutputsFormat({kOpFormat_DEFAULT});
builder_info_to_special_foramt->SetInputsFormat({kOpFormat_DEFAULT});
std::vector<AnfNodePtr> next_trans_node_inputs = {
NewValueNode(std::make_shared<Primitive>(prim::kPrimTransData->name())), cnode};
MS_EXCEPTION_IF_NULL(func_graph);
auto next_trans_node = func_graph->NewCNode(next_trans_node_inputs);
MS_EXCEPTION_IF_NULL(next_trans_node);
next_trans_node->set_abstract(cnode->abstract());
AnfAlgo::SetSelectKernelBuildInfo(builder_info_to_default->Build(), cnode.get());
AnfAlgo::SetSelectKernelBuildInfo(builder_info_to_special_foramt->Build(), next_trans_node.get());

View File

@ -51,6 +51,9 @@ int64_t GetInterSection(int64_t start_1, int64_t end_1, int64_t start_2, int64_t
bool GetKernelSize(const AnfNodePtr &node, int64_t *kd, int64_t *kh, int64_t *kw) {
MS_EXCEPTION_IF_NULL(node);
MS_EXCEPTION_IF_NULL(kd);
MS_EXCEPTION_IF_NULL(kh);
MS_EXCEPTION_IF_NULL(kw);
if (AnfAlgo::HasNodeAttr("kernel_size", node->cast<CNodePtr>())) {
auto kernel_size = AnfAlgo::GetNodeAttr<std::vector<int64_t>>(node, "kernel_size");
if (kernel_size.size() == 1) {
@ -76,6 +79,9 @@ bool GetKernelSize(const AnfNodePtr &node, int64_t *kd, int64_t *kh, int64_t *kw
bool GetStrideSize(const AnfNodePtr &node, int64_t *sd, int64_t *sh, int64_t *sw) {
MS_EXCEPTION_IF_NULL(node);
MS_EXCEPTION_IF_NULL(sd);
MS_EXCEPTION_IF_NULL(sh);
MS_EXCEPTION_IF_NULL(sw);
if (AnfAlgo::HasNodeAttr("strides", node->cast<CNodePtr>())) {
auto kernel_size = AnfAlgo::GetNodeAttr<std::vector<int64_t>>(node, "strides");
if (kernel_size.size() == 1) {
@ -164,6 +170,7 @@ AnfNodePtr ConstructFilter(const FuncGraphPtr &func_graph, const std::vector<int
auto x_abstract = std::make_shared<abstract::AbstractTensor>(kFloat16, assist_shape);
auto kernel_graph = func_graph->cast<KernelGraphPtr>();
MS_EXCEPTION_IF_NULL(kernel_graph);
auto value_node = kernel_graph->NewValueNode(x_abstract, assist_tensor);
kernel_graph->AddValueNodeToGraph(value_node);
AnfAlgo::SetOutputInferTypeAndShape({kNumberTypeFloat16}, {infer_shape}, value_node.get());
@ -179,6 +186,7 @@ AnfNodePtr ConstructMultiplier(const FuncGraphPtr &func_graph, int64_t fn, int64
std::vector<int64_t> assist_shape = {fn, fc, dd, dh, dw}; // NCDHW
auto infer_shape = {LongToSize(fn), LongToSize(fc), LongToSize(dd), LongToSize(dh), LongToSize(dw)};
tensor::TensorPtr tensor = std::make_shared<tensor::Tensor>(kNumberTypeFloat16, assist_shape);
MS_EXCEPTION_IF_NULL(tensor);
auto tensor_data = reinterpret_cast<float16 *>(tensor->data_c());
auto pad_d = pad_list[kDim0] + pad_list[kDim1];
auto pad_h = pad_list[kDim2] + pad_list[kDim3];

View File

@ -132,6 +132,7 @@ AnfNodePtr ConstructFilter(const FuncGraphPtr &func_graph, const std::vector<int
auto x_abstract = std::make_shared<abstract::AbstractTensor>(kFloat16, assist_shape);
auto kernel_graph = func_graph->cast<KernelGraphPtr>();
auto value_node = kernel_graph->NewValueNode(x_abstract, assist_tensor);
MS_EXCEPTION_IF_NULL(value_node);
kernel_graph->AddValueNodeToGraph(value_node);
AnfAlgo::SetOutputInferTypeAndShape({kNumberTypeFloat16}, {infer_shape}, value_node.get());
return value_node;
@ -147,6 +148,7 @@ AnfNodePtr ConstructMultiplier(const FuncGraphPtr &func_graph, const std::vector
(void)std::transform(ori_shape.begin(), ori_shape.end(), std::back_inserter(grad_shape), SizeToLong);
std::vector<int64_t> assist_shape = grad_shape; // NCDHW
tensor::TensorPtr tensor = std::make_shared<tensor::Tensor>(kNumberTypeFloat16, assist_shape);
MS_EXCEPTION_IF_NULL(tensor);
auto tensor_data = reinterpret_cast<float16 *>(tensor->data_c());
auto pad_d = pad_list[kDim0] + pad_list[kDim1];
auto pad_h = pad_list[kDim2] + pad_list[kDim3];
@ -162,23 +164,26 @@ AnfNodePtr ConstructMultiplier(const FuncGraphPtr &func_graph, const std::vector
for (int64_t hi = 0; hi < grad_shape[kDim3]; hi++) {
int64_t start_w = 0;
for (int64_t wi = 0; wi < grad_shape[kDim4]; wi++) {
int64_t vaild_d = 0;
int64_t vaild_h = 0;
int64_t vaild_w = 0;
int64_t valid_d = 0;
int64_t valid_h = 0;
int64_t valid_w = 0;
if (count_include_pad) {
vaild_d = start_d + kernel_size[kDim0] <= len_d ? kernel_size[kDim0] : len_d - start_d;
vaild_h = start_h + kernel_size[kDim1] <= len_h ? kernel_size[kDim1] : len_h - start_h;
vaild_w = start_w + kernel_size[kDim2] <= len_w ? kernel_size[kDim2] : len_w - start_w;
valid_d = start_d + kernel_size[kDim0] <= len_d ? kernel_size[kDim0] : len_d - start_d;
valid_h = start_h + kernel_size[kDim1] <= len_h ? kernel_size[kDim1] : len_h - start_h;
valid_w = start_w + kernel_size[kDim2] <= len_w ? kernel_size[kDim2] : len_w - start_w;
} else {
vaild_d = std::min(start_d + kernel_size[kDim0], pad_list[kDim0] + ori_input_shape[kDim2]) -
valid_d = std::min(start_d + kernel_size[kDim0], pad_list[kDim0] + ori_input_shape[kDim2]) -
std::max(pad_list[kDim0], start_d);
vaild_h = std::min(start_h + kernel_size[kDim1], pad_list[kDim2] + ori_input_shape[kDim3]) -
valid_h = std::min(start_h + kernel_size[kDim1], pad_list[kDim2] + ori_input_shape[kDim3]) -
std::max(pad_list[kDim2], start_h);
vaild_w = std::min(start_w + kernel_size[kDim2], pad_list[kDim4] + ori_input_shape[kDim4]) -
valid_w = std::min(start_w + kernel_size[kDim2], pad_list[kDim4] + ori_input_shape[kDim4]) -
std::max(pad_list[kDim4], start_w);
}
auto vaild_data = vaild_d * vaild_h * vaild_w;
float val = 1.0 / vaild_data;
auto valid_data = valid_d * valid_h * valid_w;
if (valid_data == 0) {
MS_LOG(EXCEPTION) << "Divisor 'valid_data' should not be 0.";
}
float val = 1.0 / valid_data;
*tensor_data = float16(val);
++tensor_data;
start_w += strides[kDim2];

View File

@ -70,9 +70,7 @@ ValueNodePtr CreateKeepPorbValueNode(const FuncGraphPtr &func_graph, const AnfNo
if (!AnfAlgo::HasNodeAttr(kKeepProb, cnode)) {
MS_LOG(EXCEPTION) << "Dropout node does not have attr: keep_prob.";
}
auto prim = AnfAlgo::GetCNodePrimitive(cnode);
MS_EXCEPTION_IF_NULL(prim);
if (prim->ToString() == kDropoutOpName) {
if (AnfAlgo::GetCNodeName(cnode) == kDropoutOpName) {
if (!AnfAlgo::HasNodeAttr(kSeed0, cnode) || !AnfAlgo::HasNodeAttr(kSeed1, cnode)) {
MS_LOG(EXCEPTION) << "Dropout node does not have attr: seed0 or seed1.";
}
@ -127,6 +125,7 @@ bool NeedUpdate(const CNodePtr &getitem_cnode) {
CNodePtr CreateDynamicShapeCNode(const FuncGraphPtr &func_graph, const AnfNodePtr &node_input,
const abstract::ShapePtr &input_shape) {
MS_EXCEPTION_IF_NULL(func_graph);
MS_EXCEPTION_IF_NULL(input_shape);
std::vector<AnfNodePtr> dynamic_shape_inputs{NewValueNode(std::make_shared<Primitive>("DynamicShape")), node_input};
CNodePtr dynamic_shape = func_graph->NewCNode(dynamic_shape_inputs);
MS_EXCEPTION_IF_NULL(dynamic_shape);
@ -135,6 +134,7 @@ CNodePtr CreateDynamicShapeCNode(const FuncGraphPtr &func_graph, const AnfNodePt
std::make_shared<abstract::AbstractTensor>(kInt64, std::make_shared<abstract::Shape>(tensor_shp));
auto max_value = MakeValue(input_shape->max_shape());
auto min_value = MakeValue(input_shape->min_shape());
MS_EXCEPTION_IF_NULL(dynamic_shape_abstract);
dynamic_shape_abstract->set_value_range(min_value, max_value);
dynamic_shape->set_abstract(dynamic_shape_abstract);
return dynamic_shape;
@ -145,6 +145,7 @@ CNodePtr CreateDropoutGenMaskCNode(const FuncGraphPtr &func_graph, const AnfNode
const abstract::ShapePtr &input_shape) {
MS_EXCEPTION_IF_NULL(func_graph);
MS_EXCEPTION_IF_NULL(dropout);
MS_EXCEPTION_IF_NULL(input_shape);
std::vector<AnfNodePtr> dropout_gen_mask_inputs{NewValueNode(std::make_shared<Primitive>(kDropoutGenMaskOpName))};
if (input_shape->IsDynamic()) {
CNodePtr dynamic_shape = CreateDynamicShapeCNode(func_graph, dropout_input, input_shape);
@ -233,6 +234,7 @@ const AnfNodePtr DropoutAndDropoutGradUnifyMindIR::Process(const FuncGraphPtr &f
if (iter != node_users.end()) {
for (auto &node_index : iter->second) {
auto used_node = node_index.first;
MS_EXCEPTION_IF_NULL(used_node);
if (AnfAlgo::CheckPrimitiveType(used_node, prim::kPrimTupleGetItem)) {
// check if Dropout's first output, which is used by forward, is used
if (AnfAlgo::GetTupleGetItemOutIndex(used_node->cast<CNodePtr>()) == 0) {

View File

@ -292,7 +292,7 @@ CNodePtr CreateTile(const FuncGraphPtr &graph, const CNodePtr &sparse_softmax_no
}
// feature map set
std::vector<size_t> feature_map_input_indexs;
feature_map_input_indexs.emplace_back(0);
(void)feature_map_input_indexs.emplace_back(0);
AnfAlgo::SetNodeAttr(kIsFeatureMapInputList, MakeValue(feature_map_input_indexs), tile_node);
return tile_node;
}

View File

@ -76,6 +76,7 @@ void InsertCast(const FuncGraphPtr &func_graph, const CNodePtr &cnode) {
origin_type = AnfAlgo::GetOutputInferDataType(prev_node.first, prev_node.second);
}
auto cur_input = AnfAlgo::GetInputNode(cnode, input_index);
MS_EXCEPTION_IF_NULL(cur_input);
if (cur_input->isa<Parameter>() && AnfAlgo::IsParameterWeight(cur_input->cast<ParameterPtr>())) {
continue;
}

View File

@ -181,8 +181,11 @@ AnfNodePtr AnfRuntimeAlgorithm::MakeMonadValueNode(const KernelGraphPtr &kg) {
// ...
// out = Depend(out, latter)
void AnfRuntimeAlgorithm::KeepOrder(const KernelGraphPtr &kg, const AnfNodePtr &former, const AnfNodePtr &latter) {
MS_EXCEPTION_IF_NULL(kg);
MS_EXCEPTION_IF_NULL(latter);
if (latter->isa<CNode>()) {
auto latter_cnode = latter->cast<CNodePtr>();
MS_EXCEPTION_IF_NULL(latter_cnode);
constexpr size_t inputsize = 2;
constexpr size_t kFirstDataInputIndex = 1;
if (latter_cnode->inputs().size() < inputsize) {
@ -190,6 +193,7 @@ void AnfRuntimeAlgorithm::KeepOrder(const KernelGraphPtr &kg, const AnfNodePtr &
}
auto latter_input = latter_cnode->input(kFirstDataInputIndex);
auto depend1 = kg->NewCNode({NewValueNode(prim::kPrimDepend), latter_input, former});
MS_EXCEPTION_IF_NULL(depend1);
depend1->set_abstract(latter_input->abstract());
latter_cnode->set_input(kFirstDataInputIndex, depend1);
@ -197,6 +201,7 @@ void AnfRuntimeAlgorithm::KeepOrder(const KernelGraphPtr &kg, const AnfNodePtr &
MS_EXCEPTION_IF_NULL(return_node);
auto depend2 = kg->NewCNode(
{NewValueNode(prim::kPrimDepend), return_node->cast<CNodePtr>()->input(kFirstDataInputIndex), latter});
MS_EXCEPTION_IF_NULL(depend2);
depend2->set_abstract(return_node->cast<CNodePtr>()->input(kFirstDataInputIndex)->abstract());
kg->set_output(depend2);
MS_LOG(DEBUG) << "former: " << former->DebugString() << ", latter: " << latter->DebugString()
@ -394,6 +399,7 @@ std::vector<KernelWithIndex> AnfRuntimeAlgorithm::GetAllOutputWithIndex(const An
// Ignore the output of front call node.
if (output_with_index.first->isa<CNode>()) {
auto cnode = output_with_index.first->cast<CNodePtr>();
MS_EXCEPTION_IF_NULL(cnode);
auto inputs = cnode->inputs();
if (inputs[0]->isa<CNode>()) {
MS_LOG(INFO) << "The output is call node: " << output_with_index.first->DebugString();
@ -1158,6 +1164,7 @@ DeviceAddressPtr AnfRuntimeAlgorithm::GetMutableWorkspaceAddr(const AnfNodePtr &
abstract::BaseShapePtr AnfRuntimeAlgorithm::GetOutputDetailShape(const AnfNodePtr &node, size_t output_idx) {
MS_EXCEPTION_IF_NULL(node);
auto base_shape = node->Shape();
MS_EXCEPTION_IF_NULL(base_shape);
if (base_shape->isa<abstract::Shape>()) {
if (output_idx == 0) {
return base_shape;

View File

@ -535,6 +535,7 @@ class CallInfoFinder {
}
void DoSearchRecursiveCall(const KernelGraphPtr &graph, const CallSite &call_site, SearchRecursiveContext *ctx) {
MS_EXCEPTION_IF_NULL(ctx);
// Record call path.
ctx->call_path.push_back(graph);
// Handle callee graphs.
@ -547,6 +548,7 @@ class CallInfoFinder {
context_.call_info_map[g].recursive = true;
}
// Mark recursive for the start call-site.
MS_EXCEPTION_IF_NULL(ctx->start_site);
ctx->start_site->recursive = true;
continue;
}
@ -742,6 +744,7 @@ class AscendAutoMonadConverter {
// Set iteration end points for Profiling.
static void SetIterEndAttrForTopGraph(AscendAutoMonadContext *context, const KernelGraphPtr &kg) {
MS_EXCEPTION_IF_NULL(kg);
kg->SetExecOrderByDefault();
auto &nodes = kg->execution_order();
auto end_iter = nodes.rend();
@ -777,6 +780,7 @@ class AscendAutoMonadConverter {
// Set Attr to the iter-end points.
static void SetIterEndAttr(AscendAutoMonadContext *context, const KernelGraphPtr &kg, bool has_call_site) {
MS_EXCEPTION_IF_NULL(kg);
kg->SetExecOrderByDefault();
auto &nodes = kg->execution_order();
auto end_iter = nodes.rend();
@ -807,6 +811,7 @@ class AscendAutoMonadConverter {
// Find all iteration end points recursively.
static void FindProfilingEndPoints(AscendAutoMonadContext *context, const KernelGraphPtr &kg,
std::set<KernelGraphPtr> *memo) {
MS_EXCEPTION_IF_NULL(memo);
memo->insert(kg);
auto call_info = context->call_info_map[kg];
// 1. find the last call site; if no call site, goto step 3.
@ -833,6 +838,7 @@ class AscendAutoMonadConverter {
void InitStack() {
if (!context_.HasInitedStack() && need_stackops_) {
auto top_graph = context_.TopGraph();
MS_EXCEPTION_IF_NULL(top_graph);
auto exec_order = top_graph->execution_order();
auto stack_init = StackInit(top_graph);
AnfAlgo::KeepOrder(top_graph, stack_init, *exec_order.begin());
@ -879,6 +885,7 @@ class AscendAutoMonadConverter {
// Find nodes which need StackOps, and insert StackOps for node.
void FindInputNode(const std::vector<AnfNodePtr> &before_nodes, const CNodePtr &node,
std::vector<CNodePtr> *stack_pushs) {
MS_EXCEPTION_IF_NULL(node);
uint32_t start_index = 1;
if (AnfAlgo::CheckPrimitiveType(node, prim::kPrimAssign)) {
start_index = kInputIndex;
@ -889,6 +896,7 @@ class AscendAutoMonadConverter {
if (HasAbstractMonad(node_input)) {
continue;
}
MS_EXCEPTION_IF_NULL(node_input);
MS_LOG(DEBUG) << "check node input[" << i << "]: " << node_input->DebugString();
if (node_input->isa<Parameter>()) {
MS_LOG(DEBUG) << "node_input:" << node_input->DebugString() << " is a param";
@ -909,9 +917,12 @@ class AscendAutoMonadConverter {
// Create StackOps for node_input.
CNodePtr InsertStackPop(const AnfNodePtr &node_input, std::vector<CNodePtr> *stack_pushs) {
MS_EXCEPTION_IF_NULL(node_input);
MS_EXCEPTION_IF_NULL(stack_pushs);
auto stack_push = StackPush(node_input);
stack_pushs->emplace_back(stack_push);
auto stack_pop = StackPop();
MS_EXCEPTION_IF_NULL(stack_pop);
stack_pop->set_abstract(node_input->abstract());
return stack_pop;
}

View File

@ -317,6 +317,7 @@ bool NeedMemcpyInDevice(const device::DeviceAddressPtr &src_device_addr,
bool TensorNeedSync(const std::shared_ptr<KernelGraph> &kernel_graph, const AnfNodePtr &parameter,
const tensor::TensorPtr &tensor, uint32_t *memcpy_nums) {
MS_EXCEPTION_IF_NULL(tensor);
if (tensor->NeedSyncHostToDevice()) {
return true;
}

View File

@ -102,6 +102,7 @@ void CPUSession::Optimize(const std::shared_ptr<KernelGraph> &kernel_graph) {
void CPUSession::ProcessCast(const std::shared_ptr<KernelGraph> &kernel_graph) {
auto optimizer = std::make_shared<opt::GraphOptimizer>();
auto pm = std::make_shared<opt::PassManager>();
MS_EXCEPTION_IF_NULL(pm);
pm->AddPass(std::make_shared<opt::InsertCastCPU>("insert_cast_cpu"));
MS_LOG(INFO) << "Insert cast pass";
pm->AddPass(std::make_shared<opt::EraseVisitAttr>());
@ -263,6 +264,7 @@ void CPUSession::UpdateDynamicOutputShape(const std::map<tensor::TensorPtr, Kern
const auto &shape = AnfAlgo::GetOutputInferShape(kernel, output_index);
std::vector<int64_t> refresh_shape;
(void)std::copy(shape.begin(), shape.end(), std::back_inserter(refresh_shape));
MS_EXCEPTION_IF_NULL(tensor_node.first);
tensor_node.first->set_shape(refresh_shape);
}
}

View File

@ -1370,6 +1370,7 @@ void KernelGraph::SetOptimizerFlag() {
for (auto &input : cnode->inputs()) {
MS_EXCEPTION_IF_NULL(input);
auto real_node = AnfAlgo::VisitKernel(input, 0).first;
MS_EXCEPTION_IF_NULL(real_node);
if (!real_node->isa<Parameter>()) {
continue;
}

View File

@ -226,6 +226,7 @@ BaseRef CreateNodeOutputTensor(const session::KernelWithIndex &node_output_pair,
} else {
tensor = std::make_shared<tensor::Tensor>(type_id, temp_shape);
}
MS_EXCEPTION_IF_NULL(tensor);
tensor->set_padding_type(AnfAlgo::GetOutputReshapeType(node, output_index));
if (is_internal_output) {
tensor->set_sync_status(kNoNeedSync);
@ -436,6 +437,7 @@ BaseRef CreateNodeOutputPlaceholder(const AnfNodePtr &anf, const KernelGraphPtr
}
void CheckInputTensorShape(const TensorPtr &tensor, const CNodePtr &kernel, size_t input_index) {
MS_EXCEPTION_IF_NULL(tensor);
const auto &tensor_shape = tensor->shape();
const auto input_shape = AnfAlgo::GetPrevNodeOutputInferShape(kernel, input_index);
if (tensor_shape.size() != input_shape.size()) {

View File

@ -359,6 +359,7 @@ void SetCastAndWeightFormat(const CNodePtr &kernel_node) {
void SetWeightFormat(const AnfNodePtr &real_input_node, std::vector<string> output_format, const CNodePtr &kernel_node,
size_t input_index, bool force_fresh = false) {
MS_EXCEPTION_IF_NULL(real_input_node);
if (real_input_node->isa<CNode>() || AnfAlgo::OutputAddrExist(real_input_node, 0)) {
return;
}

View File

@ -186,6 +186,7 @@ DeviceAddressPtr CPUKernelRuntime::CreateDeviceAddress(void *device_ptr, size_t
tensor::TensorPtr CPUKernelRuntime::CreatTensorForOutput(
session::KernelGraph *kernel_graph, const CNodePtr &node, size_t index,
std::map<tensor::TensorPtr, session::KernelWithIndex> *tensor_to_node) {
MS_EXCEPTION_IF_NULL(kernel_graph);
MS_EXCEPTION_IF_NULL(node);
MS_EXCEPTION_IF_NULL(tensor_to_node);
size_t output_size = AnfAlgo::GetOutputTensorNum(node);

View File

@ -43,9 +43,10 @@ bool IsInputNotCNode(const CNodePtr &kernel_node, size_t input_index) {
}
void UpdatePrevNotCNodeFormatDtype(const KernelAttr &kernel_attr, const std::vector<size_t> &input_not_cnode_indexes,
const CNodePtr kernel_node) {
const CNodePtr &kernel_node) {
for (auto &input_index : input_not_cnode_indexes) {
auto input_node = AnfAlgo::VisitKernel(kernel_node->input(input_index + 1), 0).first;
MS_EXCEPTION_IF_NULL(input_node);
if (input_node->isa<Parameter>() && AnfAlgo::IsParameterWeight(input_node->cast<ParameterPtr>())) {
MS_EXCEPTION_IF_NULL(input_node);
std::vector<TypeId> output_types;