!21053 code check clean

Merge pull request !21053 from yuchaojie/code-clean
This commit is contained in:
i-robot 2021-07-30 09:50:10 +00:00 committed by Gitee
commit 92362f8c94
8 changed files with 41 additions and 29 deletions

View File

@ -51,7 +51,7 @@ std::string GetKernelFormat(const CNodePtr &kernel_node, size_t index) {
return kOpFormat_DEFAULT;
}
}
if (format == kOpFormat_FRAC_NZ && input_shape.size() <= 2) {
if (format == kOpFormat_FRAC_NZ && input_shape.size() <= kShape2dDims) {
return kOpFormat_DEFAULT;
}
if (kReduceNoSupportedSet.find(format) != kReduceNoSupportedSet.end()) {

View File

@ -58,8 +58,8 @@ kernel::KernelBuildInfoPtr GenerateKernelBuildInfo(const AnfNodePtr &concat, con
size_t concat_input_num = AnfAlgo::GetInputTensorNum(concat);
for (size_t i = 0; i < concat_input_num; ++i) {
size_t input_index = allgather_input_idx + i * allgather_input_num;
inputs_device_format.emplace_back(std::get<2>(allgather_output_info)[input_index]);
inputs_device_type.emplace_back(std::get<3>(allgather_output_info)[input_index]);
inputs_device_format.emplace_back(std::get<kIndex2>(allgather_output_info)[input_index]);
inputs_device_type.emplace_back(std::get<kIndex3>(allgather_output_info)[input_index]);
}
// Current only support default format & float16
auto cmp_format = inputs_device_format.begin();

View File

@ -20,6 +20,9 @@
#include "frontend/optimizer/opt.h"
namespace mindspore::opt {
namespace {
constexpr size_t kTensorMoveNextNodeInputSize = 2;
} // namespace
const BaseRef GetnextTensorMoveElimination::DefinePattern() const {
auto prim_tensor_move = std::make_shared<Primitive>(kTensorMoveOpName);
@ -74,7 +77,7 @@ const AnfNodePtr GetnextTensorMoveElimination::Process(const FuncGraphPtr &graph
return nullptr;
}
if (next_node->inputs().size() != 2) {
if (next_node->inputs().size() != kTensorMoveNextNodeInputSize) {
MS_LOG(DEBUG) << "next node has more than one input";
return nullptr;
}

View File

@ -104,8 +104,7 @@ CNodePtr DealRefAndSpiltUnSupportedTransdata::AddAdditionalToRefOutput(const Fun
bool need_refresh_ref_addr = false;
size_t final_index = output_index;
AnfNodePtr input_node = AnfAlgo::GetInputNode(cnode, input_index);
session::KernelWithIndex origin_pair;
origin_pair = FindRefOriginNode(input_node);
session::KernelWithIndex origin_pair = FindRefOriginNode(input_node);
MS_EXCEPTION_IF_NULL(origin_pair.first);
if (!origin_pair.first->isa<Parameter>()) {
MS_LOG(WARNING) << "ref op origin node is not parameter";

View File

@ -27,6 +27,13 @@ namespace {
constexpr size_t kDynamicRNNGradInputNum = 16;
constexpr size_t kSplitVOutputNum = 2;
constexpr size_t kBasicCellOutputNum = 2;
constexpr size_t kBasicLstmCStateGradOutput0DimNum = 3;
constexpr int64_t kAttrNValue = 2;
constexpr int64_t kAttrDynInputSizesValue = 2;
constexpr int64_t kAttrAxis2Value = 2;
constexpr int64_t kAttrNumSplitValue = 2;
constexpr int64_t kAttrSplitDimValue = 2;
constexpr size_t kDimMultiNum = 4;
void CreateTLoopNode(const FuncGraphPtr &func_graph, const CNodePtr &dynamic_rnn_grad_cnode,
std::vector<std::vector<AnfNodePtr>> *result_nodes) {
@ -47,8 +54,9 @@ void CreateTLoopNode(const FuncGraphPtr &func_graph, const CNodePtr &dynamic_rnn
NewValueNode(std::make_shared<Primitive>(kBasicLSTMCellCStateGradV2OpName))};
auto basic_lstm_cell_c_state_grad = func_graph->NewCNode(basic_lstm_cell_c_state_grad_inputs);
std::vector<size_t> output0_dims{origin_input9_shape[kDim0],
4 * (((origin_input9_shape[kDim1] + kCubeSize - 1) / kCubeSize) * kCubeSize)};
std::vector<size_t> output0_dims{
origin_input9_shape[kDim0],
kDimMultiNum * (((origin_input9_shape[kDim1] + kCubeSize - 1) / kCubeSize) * kCubeSize)};
std::vector<size_t> output1_dims{input_i_shape[kDim1], input_i_shape[kDim2]};
AnfAlgo::SetOutputInferTypeAndShape({kNumberTypeFloat16, kNumberTypeFloat32}, {output0_dims, output1_dims},
basic_lstm_cell_c_state_grad.get());
@ -79,8 +87,8 @@ void CreateTLoopNode(const FuncGraphPtr &func_graph, const CNodePtr &dynamic_rnn
SizeToLong((origin_output2_shape[kDim2] + kCubeSize - 1) / kCubeSize * kCubeSize),
SizeToLong((origin_output3_shape[kDim1] + kCubeSize - 1) / kCubeSize * kCubeSize)}),
split_v);
AnfAlgo::SetNodeAttr(kAttrSplitDim, MakeValue(static_cast<int64_t>(2)), split_v);
AnfAlgo::SetNodeAttr(kAttrNumSplit, MakeValue(static_cast<int64_t>(2)), split_v);
AnfAlgo::SetNodeAttr(kAttrSplitDim, MakeValue(static_cast<int64_t>(kAttrSplitDimValue)), split_v);
AnfAlgo::SetNodeAttr(kAttrNumSplit, MakeValue(static_cast<int64_t>(kAttrNumSplitValue)), split_v);
basic_lstm_cell_c_state_grad_nodes.emplace_back(basic_lstm_cell_c_state_grad);
matmul_nodes.emplace_back(matmul);
@ -242,7 +250,6 @@ AnfNodePtr AddLSTMInputGradNode(const FuncGraphPtr &func_graph, const CNodePtr &
auto basic_lstm_cell_c_state_grad_outputs_0_shape =
AnfAlgo::GetOutputInferShape(basic_lstm_cell_c_state_grad_outputs[0], 0);
std::vector<size_t> temp_shape;
constexpr size_t kBasicLstmCStateGradOutput0DimNum = 3;
if (basic_lstm_cell_c_state_grad_outputs_0_shape.size() == kBasicLstmCStateGradOutput0DimNum) {
temp_shape = basic_lstm_cell_c_state_grad_outputs_0_shape;
} else {
@ -269,7 +276,8 @@ AnfNodePtr AddLSTMInputGradNode(const FuncGraphPtr &func_graph, const CNodePtr &
auto lstm_gage_concat = func_graph->NewCNode(lstm_gage_concat_input);
auto origin_input7_shape = AnfAlgo::GetOutputInferShape(origin_input7, 0);
AnfAlgo::SetOutputInferTypeAndShape(
{kNumberTypeFloat16}, {{origin_input7_shape[kDim0], origin_input7_shape[kDim1], 4 * origin_input7_shape[kDim2]}},
{kNumberTypeFloat16},
{{origin_input7_shape[kDim0], origin_input7_shape[kDim1], kDimMultiNum * origin_input7_shape[kDim2]}},
lstm_gage_concat.get());
AnfAlgo::SetNodeAttr(kAttrN, MakeValue(SizeToLong(num_split_x)), lstm_gage_concat);
AnfAlgo::SetNodeAttr(kAttrDynInputSizes, MakeValue(std::vector<int64_t>{SizeToLong(num_split_x)}), lstm_gage_concat);
@ -298,7 +306,7 @@ AnfNodePtr CreateSplitV(const FuncGraphPtr &func_graph, const CNodePtr &dynamic_
AnfAlgo::SetOutputInferTypeAndShape(dtypes, shapes, split_v.get());
// Set attr
AnfAlgo::SetNodeAttr(kAttrSplitDim, MakeValue(SizeToLong(0)), split_v);
AnfAlgo::SetNodeAttr(kAttrNumSplit, MakeValue(SizeToLong(2)), split_v);
AnfAlgo::SetNodeAttr(kAttrNumSplit, MakeValue(SizeToLong(kAttrNumSplitValue)), split_v);
AnfAlgo::SetNodeAttr(kAttrSizeSplits, MakeValue(std::vector<int64_t>{SizeToLong(origin_input6_shape[0] - 1), 1}),
split_v);
AnfAlgo::SetNodeAttr("is_backend_insert", MakeValue(true), split_v);
@ -321,8 +329,7 @@ AnfNodePtr CreateHConcat(const FuncGraphPtr &func_graph, const CNodePtr &dynamic
auto origin_input4_shape = AnfAlgo::GetOutputInferShape(origin_input4, 0);
// Create reshape to change shape
std::vector<size_t> shape_tmp;
constexpr size_t kInput4DimNum = 3;
if (origin_input4_shape.size() == kInput4DimNum) {
if (origin_input4_shape.size() == kShape4dDims) {
shape_tmp = origin_input4_shape;
} else {
shape_tmp = {1, origin_input4_shape[0], origin_input4_shape[1]};
@ -339,8 +346,8 @@ AnfNodePtr CreateHConcat(const FuncGraphPtr &func_graph, const CNodePtr &dynamic
std::vector<size_t> shape = {splitv_output0_shape[0] + 1, origin_input4_shape[0], origin_input4_shape[1]};
AnfAlgo::SetOutputInferTypeAndShape({AnfAlgo::GetOutputInferDataType(origin_input4, 0)}, {shape}, concat.get());
// Set attr
AnfAlgo::SetNodeAttr(kAttrN, MakeValue(SizeToLong(2)), concat);
AnfAlgo::SetNodeAttr(kAttrDynInputSizes, MakeValue(std::vector<int64_t>{2}), concat);
AnfAlgo::SetNodeAttr(kAttrN, MakeValue(SizeToLong(kAttrNValue)), concat);
AnfAlgo::SetNodeAttr(kAttrDynInputSizes, MakeValue(std::vector<int64_t>{kAttrDynInputSizesValue}), concat);
AnfAlgo::SetNodeAttr(kAttrAxis, MakeValue(SizeToLong(0)), concat);
AnfAlgo::SetNodeAttr("is_backend_insert", MakeValue(true), concat);
return concat;
@ -362,9 +369,9 @@ AnfNodePtr CreateConcat(const FuncGraphPtr &func_graph, const CNodePtr &dynamic_
origin_output0_shape[kDim2] + h_concat_output_shape[kDim2]};
AnfAlgo::SetOutputInferTypeAndShape({AnfAlgo::GetOutputInferDataType(origin_input0, 0)}, {shape}, concat.get());
// Set attr
AnfAlgo::SetNodeAttr(kAttrN, MakeValue(SizeToLong(2)), concat);
AnfAlgo::SetNodeAttr(kAttrDynInputSizes, MakeValue(std::vector<int64_t>{2}), concat);
AnfAlgo::SetNodeAttr(kAttrAxis, MakeValue(SizeToLong(2)), concat);
AnfAlgo::SetNodeAttr(kAttrN, MakeValue(SizeToLong(kAttrNValue)), concat);
AnfAlgo::SetNodeAttr(kAttrDynInputSizes, MakeValue(std::vector<int64_t>{kAttrDynInputSizesValue}), concat);
AnfAlgo::SetNodeAttr(kAttrAxis, MakeValue(SizeToLong(kAttrAxis2Value)), concat);
AnfAlgo::SetNodeAttr("is_backend_insert", MakeValue(true), concat);
return concat;
}
@ -378,8 +385,7 @@ AnfNodePtr CreateConcatNodeT1(const FuncGraphPtr &func_graph, const CNodePtr &dy
auto origin_input4_shape = AnfAlgo::GetOutputInferShape(origin_input4, 0);
// Create reshape to change shape
std::vector<size_t> shape_tmp;
constexpr size_t kShapeDimNum = 3;
if (origin_input4_shape.size() == kShapeDimNum) {
if (origin_input4_shape.size() == kShape3dDims) {
shape_tmp = origin_input4_shape;
} else {
shape_tmp = {1, origin_input4_shape[0], origin_input4_shape[1]};
@ -398,9 +404,9 @@ AnfNodePtr CreateConcatNodeT1(const FuncGraphPtr &func_graph, const CNodePtr &dy
origin_input0_shape[kDim2] + shape_tmp[kDim2]};
AnfAlgo::SetOutputInferTypeAndShape({AnfAlgo::GetOutputInferDataType(origin_input0, 0)}, {shape}, concat.get());
// Set attr
AnfAlgo::SetNodeAttr(kAttrN, MakeValue(SizeToLong(2)), concat);
AnfAlgo::SetNodeAttr(kAttrDynInputSizes, MakeValue(std::vector<int64_t>{2}), concat);
AnfAlgo::SetNodeAttr(kAttrAxis, MakeValue(SizeToLong(2)), concat);
AnfAlgo::SetNodeAttr(kAttrN, MakeValue(SizeToLong(kAttrNValue)), concat);
AnfAlgo::SetNodeAttr(kAttrDynInputSizes, MakeValue(std::vector<int64_t>{kAttrDynInputSizesValue}), concat);
AnfAlgo::SetNodeAttr(kAttrAxis, MakeValue(SizeToLong(kAttrAxis2Value)), concat);
AnfAlgo::SetNodeAttr("is_backend_insert", MakeValue(true), concat);
return concat;
}
@ -432,8 +438,8 @@ AnfNodePtr CreateBatchMatMul2(const FuncGraphPtr &func_graph, const AnfNodePtr &
node, lstm_input_grad};
auto batch_matmul = func_graph->NewCNode(matmul_inputs);
// Set infer data type and shape
auto out_shape = {AnfAlgo::GetOutputInferShape(lstm_input_grad, 0)[0], IntToSize(1),
AnfAlgo::GetOutputInferShape(lstm_input_grad, 0)[2]};
auto out_shape = {AnfAlgo::GetOutputInferShape(lstm_input_grad, 0)[kIndex0], IntToSize(1),
AnfAlgo::GetOutputInferShape(lstm_input_grad, 0)[kIndex2]};
AnfAlgo::SetOutputInferTypeAndShape({kNumberTypeFloat16}, {out_shape}, batch_matmul.get());
// Set attr
AnfAlgo::SetNodeAttr("is_backend_insert", MakeValue(true), batch_matmul);

View File

@ -202,7 +202,11 @@ AnfNodePtr ConstructMultiplier(const FuncGraphPtr &func_graph, int64_t fn, int64
auto vaild_w = GetInterSection(start_w, start_w + kw, pad_list[kDim4], pad_list[kDim4] + fw);
auto vaild_data = vaild_d * vaild_h * vaild_w;
auto vaild_kernel = v_kd * v_kh * v_kw;
float val = count_include_pad ? 1.0 / vaild_kernel : 1.0 / vaild_data;
auto valid_dividend = count_include_pad ? vaild_kernel : vaild_data;
if (valid_dividend == 0) {
MS_LOG(EXCEPTION) << "Dividend 'valid_dividend' should not be 0.";
}
float val = 1.0 / valid_dividend;
*tensor_data = float16(val);
++tensor_data;
start_w += sw;

View File

@ -120,7 +120,6 @@ void SetAttrForOutputNode(const FuncGraphManagerPtr &manager, const AnfNodePtr &
auto output_idx = GetValue<int64_t>(GetValueNode(getitem_input2));
if (output_idx == getitem_idx) {
SetAttrForOutputNode(manager, output_node, groups);
return;
}
} else {
SetAttrForOutputNode(manager, output_node, groups);

View File

@ -457,6 +457,7 @@ constexpr auto kValueTargetOther = "target_other";
// some size
const size_t kShape4dDims = 4;
const size_t kShape3dDims = 3;
const size_t kShape2dDims = 2;
const size_t kShape5dDims = 5;
const size_t kShape1dDims = 1;