forked from mindspore-Ecosystem/mindspore
!25533 [MSLITE] CodeCheck: Pclint clean.
Merge pull request !25533 from wangshaocong/codex
This commit is contained in:
commit
b78c163705
|
@ -426,9 +426,11 @@ bool CheckOfflineParallelConfig(const std::string &file, ParallelSplitConfig *pa
|
|||
const char *colon = ":";
|
||||
for (const auto &device : device_rates) {
|
||||
std::vector<std::string> rate = lite::SplitStringToVector(device, *colon);
|
||||
auto compute_rate = std::atoi(rate.back().c_str());
|
||||
if (compute_rate == 0) {
|
||||
MS_LOG(ERROR) << "The compute rate is invalid.";
|
||||
int64_t compute_rate = 0;
|
||||
try {
|
||||
compute_rate = std::stoi(rate.back());
|
||||
} catch (const std::exception &e) {
|
||||
MS_LOG(ERROR) << "Get compute rate failed: " << e.what();
|
||||
return false;
|
||||
}
|
||||
parallel_split_config->parallel_compute_rates_.push_back(compute_rate);
|
||||
|
|
|
@ -68,11 +68,12 @@ int GetMatData(const cv::Mat &mat, void **data, size_t *size) {
|
|||
}
|
||||
(*size) = 0;
|
||||
for (int i = 0; i < mat.rows; ++i) {
|
||||
(*size) += mat.cols * mat.elemSize();
|
||||
(*size) += static_cast<size_t>(mat.cols) * mat.elemSize();
|
||||
}
|
||||
|
||||
(*data) = new char[*size];
|
||||
if (memcpy_s(*data, *size, mat_local.data, mat.rows * mat.cols * mat.channels() * sizeof(float)) != EOK) {
|
||||
if (memcpy_s(*data, *size, mat_local.data,
|
||||
static_cast<size_t>(mat.rows * mat.cols * mat.channels()) * sizeof(float)) != EOK) {
|
||||
MS_LOG(ERROR) << "memcpy failed.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
|
|
|
@ -360,7 +360,7 @@ int ConstFoldPass::DoConstantFold(const FuncGraphPtr &func_graph, const CNodePtr
|
|||
return lite::RET_ERROR;
|
||||
}
|
||||
if (std::any_of(inputs_ptr.begin(), inputs_ptr.end(),
|
||||
[](TensorPtr input) { return input->data_type() == kObjectTypeTensorType; })) {
|
||||
[](const TensorPtr &input) { return input->data_type() == kObjectTypeTensorType; })) {
|
||||
MS_LOG(DEBUG) << "this op is control flow op, which is not supported now.";
|
||||
return lite::RET_OK;
|
||||
}
|
||||
|
@ -370,16 +370,16 @@ int ConstFoldPass::DoConstantFold(const FuncGraphPtr &func_graph, const CNodePtr
|
|||
return lite::RET_ERROR;
|
||||
}
|
||||
if (std::any_of(outputs_ptr.begin(), outputs_ptr.end(),
|
||||
[](TensorPtr output) { return output->data_type() == kObjectTypeTensorType; })) {
|
||||
[](const TensorPtr &output) { return output->data_type() == kObjectTypeTensorType; })) {
|
||||
MS_LOG(DEBUG) << "this op is control flow op, which is not supported now.";
|
||||
return lite::RET_OK;
|
||||
}
|
||||
std::vector<Tensor *> input_tensors;
|
||||
std::transform(inputs_ptr.begin(), inputs_ptr.end(), std::back_inserter(input_tensors),
|
||||
[](TensorPtr input) { return input.get(); });
|
||||
[](const TensorPtr &input) { return input.get(); });
|
||||
std::vector<Tensor *> output_tensors;
|
||||
std::transform(outputs_ptr.begin(), outputs_ptr.end(), std::back_inserter(output_tensors),
|
||||
[](TensorPtr output) { return output.get(); });
|
||||
[](const TensorPtr &output) { return output.get(); });
|
||||
if (CopyQuantParams(cnode, input_tensors, output_tensors) != lite::RET_OK) {
|
||||
MS_LOG(ERROR) << "copy quant params failed.";
|
||||
return lite::RET_ERROR;
|
||||
|
|
|
@ -29,7 +29,7 @@ constexpr size_t kConvWeightIndex = 2;
|
|||
constexpr size_t kConvBiasIndex = 3;
|
||||
constexpr size_t kConvNoBiasLen = 3;
|
||||
constexpr size_t kConvWithBiasLen = 4;
|
||||
int GetOutChannels(const CNodePtr &conv_node) {
|
||||
int64_t GetOutChannels(const CNodePtr &conv_node) {
|
||||
MS_ASSERT(conv_node != nullptr);
|
||||
auto value_node = conv_node->input(0);
|
||||
MS_ASSERT(value_node != nullptr);
|
||||
|
@ -66,7 +66,7 @@ void GenerateNewWeightConv2D(float *dst_weight, const float *conv_weight, const
|
|||
}
|
||||
|
||||
void GenerateNewWeightConv2DTranspose(float *dst_weight, const float *scale_weight,
|
||||
const tensor::TensorPtr &weight_tensor, int group, int kernel_num) {
|
||||
const tensor::TensorPtr &weight_tensor, int64_t group, int kernel_num) {
|
||||
MS_ASSERT(dst_weight != nullptr && scale_weight != nullptr && weight_tensor != nullptr);
|
||||
if (group <= 0 || kernel_num <= 0) {
|
||||
return;
|
||||
|
@ -74,10 +74,10 @@ void GenerateNewWeightConv2DTranspose(float *dst_weight, const float *scale_weig
|
|||
MS_ASSERT(weight_tensor->data_c() != nullptr);
|
||||
auto weight_data = reinterpret_cast<float *>(weight_tensor->data_c());
|
||||
auto cin_group = weight_tensor->shape()[0] / group;
|
||||
int area_size = weight_tensor->shape()[kInputIndexTwo] * weight_tensor->shape()[kInputIndexTwo];
|
||||
for (int k = 0; k < cin_group; ++k) {
|
||||
for (int j = 0; j < area_size; j++) {
|
||||
for (int i = 0; i < kernel_num; ++i) {
|
||||
int64_t area_size = weight_tensor->shape()[kInputIndexTwo] * weight_tensor->shape()[kInputIndexTwo];
|
||||
for (int64_t k = 0; k < cin_group; ++k) {
|
||||
for (int64_t j = 0; j < area_size; j++) {
|
||||
for (int64_t i = 0; i < kernel_num; ++i) {
|
||||
dst_weight[i + j * kernel_num + k * area_size * kernel_num] =
|
||||
weight_data[i + j * kernel_num + k * area_size * kernel_num] * scale_weight[i];
|
||||
}
|
||||
|
@ -109,7 +109,7 @@ const AnfNodePtr ConvTransformFusion::Process(const FuncGraphPtr &func_graph, co
|
|||
return nullptr;
|
||||
}
|
||||
auto abstr = transform_node->abstract();
|
||||
int kernel_nums = GetOutChannels(conv_node);
|
||||
int kernel_nums = static_cast<int>(GetOutChannels(conv_node));
|
||||
if (kernel_nums <= 0) {
|
||||
MS_LOG(INFO) << "Unsupported conv node, " << conv_node->DebugString();
|
||||
return node;
|
||||
|
|
|
@ -292,19 +292,22 @@ STATUS TfLstmCellFusion::PopulateBiasNode(const EquivPtr &body_equiv, const Para
|
|||
return RET_FAILED;
|
||||
}
|
||||
auto origin_tensor = std::dynamic_pointer_cast<tensor::Tensor>(old_bias_param->default_param());
|
||||
MS_CHECK_TRUE_RET(origin_tensor != nullptr, RET_ERROR);
|
||||
if (origin_tensor->data_type() != kNumberTypeFloat32 && origin_tensor->data_type() != kNumberTypeFloat) {
|
||||
MS_LOG(DEBUG) << "origin_tensor is not float32 type";
|
||||
return RET_ERROR;
|
||||
}
|
||||
auto data_ptr = reinterpret_cast<float *>(origin_tensor->data_c());
|
||||
MS_CHECK_TRUE_RET(data_ptr != nullptr, RET_ERROR);
|
||||
auto data_shape = origin_tensor->shape();
|
||||
MS_CHECK_GE(hidden_size, 0, RET_ERROR);
|
||||
if (data_shape.size() != 1 || data_shape[0] != 4 * hidden_size) {
|
||||
MS_LOG(DEBUG) << "bias data shape illegal";
|
||||
return RET_ERROR;
|
||||
}
|
||||
|
||||
std::vector<int64_t> shape{1, kBidirectionalGateNum * hidden_size};
|
||||
auto tensor_data = std::make_unique<float[]>(hidden_size * 8);
|
||||
auto tensor_data = std::make_unique<float[]>(static_cast<size_t>(hidden_size) * 8);
|
||||
MS_CHECK_TRUE_RET(tensor_data != nullptr, lite::RET_ERROR);
|
||||
auto forget_bias_node = utils::cast<AnfNodePtr>((*body_equiv)[forget_bias_]);
|
||||
if (forget_bias_node == nullptr) {
|
||||
|
@ -330,8 +333,9 @@ STATUS TfLstmCellFusion::PopulateBiasNode(const EquivPtr &body_equiv, const Para
|
|||
}
|
||||
}
|
||||
|
||||
auto tensor_info = lite::CreateTensorInfo(tensor_data.get(), hidden_size * kBidirectionalGateNum * sizeof(float),
|
||||
shape, kNumberTypeFloat32);
|
||||
auto tensor_info =
|
||||
lite::CreateTensorInfo(tensor_data.get(), static_cast<size_t>(hidden_size) * kBidirectionalGateNum * sizeof(float),
|
||||
shape, kNumberTypeFloat32);
|
||||
if (tensor_info == nullptr) {
|
||||
MS_LOG(ERROR) << "create tensor info failed.";
|
||||
return RET_ERROR;
|
||||
|
@ -401,7 +405,7 @@ CNodePtr TfLstmCellFusion::CreateLSTMNode(const FuncGraphPtr &func_graph, const
|
|||
c_weight->set_abstract(weight->abstract()->Clone());
|
||||
}
|
||||
|
||||
if (SplitWeights(weight, i_weight, c_weight, hidden_shape.back()) != RET_OK) {
|
||||
if (SplitWeights(weight, i_weight, c_weight, static_cast<int>(hidden_shape.back())) != RET_OK) {
|
||||
MS_LOG(DEBUG) << "split weight to i_weight and c_weight failed";
|
||||
return nullptr;
|
||||
}
|
||||
|
@ -414,7 +418,7 @@ CNodePtr TfLstmCellFusion::CreateLSTMNode(const FuncGraphPtr &func_graph, const
|
|||
bias_node->set_abstract(bias->abstract()->Clone());
|
||||
}
|
||||
|
||||
if (PopulateBiasNode(body_equiv, bias_node, bias, hidden_shape.back()) != RET_OK) {
|
||||
if (PopulateBiasNode(body_equiv, bias_node, bias, static_cast<int>(hidden_shape.back())) != RET_OK) {
|
||||
MS_LOG(DEBUG) << "reorder bias failed";
|
||||
return nullptr;
|
||||
}
|
||||
|
|
|
@ -582,7 +582,7 @@ int ControlFlowPass::CreateIfPartialNode(const FuncGraphPtr &fg, const size_t &i
|
|||
// set after fg inputs to cond_partial_cnode inputs
|
||||
then_partial_cnode_inputs.push_back(item);
|
||||
auto new_parameter = then_fg->add_parameter();
|
||||
MS_CHECK_TRUE_MSG(new_parameter != nullptr, lite::RET_NULL_PTR, "new_parameter is nullptr");
|
||||
MS_CHECK_TRUE_MSG(new_parameter != nullptr, RET_FAILED, "new_parameter is nullptr");
|
||||
if (index == kIfThenIndex) {
|
||||
new_parameter->set_name(item->fullname_with_scope() + "_then_fg_parameter");
|
||||
} else {
|
||||
|
@ -719,7 +719,7 @@ int ControlFlowPass::ProcessIfOp(const FuncGraphPtr &fg, const std::set<AnfNodeP
|
|||
ValueNodePtr switch_anf_primitive = lite::GetSwitchAnfPrim();
|
||||
if (switch_anf_primitive == nullptr) {
|
||||
MS_LOG(ERROR) << "GetSwitchAnfPrim failed.";
|
||||
return false;
|
||||
return RET_FAILED;
|
||||
}
|
||||
|
||||
// insert switch node
|
||||
|
|
|
@ -118,10 +118,9 @@ void SetTransType(const std::set<CNodePtr> &cnodes, FormatTransNodeType *trans_t
|
|||
}
|
||||
}
|
||||
|
||||
bool JudgeCanOptimizerForMultiOp(const FuncGraphPtr &func_graph, const std::set<CNodePtr> &in_nodes,
|
||||
const std::set<CNodePtr> &out_nodes, const std::set<CNodePtr> &middle_nodes,
|
||||
TransTypePair *trans_info) {
|
||||
MS_ASSERT(func_graph != nullptr && trans_info != nullptr);
|
||||
bool JudgeCanOptimizerForMultiOp(const std::set<CNodePtr> &in_nodes, const std::set<CNodePtr> &out_nodes,
|
||||
const std::set<CNodePtr> &middle_nodes, TransTypePair *trans_info) {
|
||||
MS_ASSERT(trans_info != nullptr);
|
||||
SetTransType(in_nodes, &trans_info->pre_);
|
||||
if (trans_info->pre_ == kNONE) {
|
||||
return false;
|
||||
|
@ -140,7 +139,7 @@ bool JudgeCanOptimizerForMultiOp(const FuncGraphPtr &func_graph, const std::set<
|
|||
}
|
||||
auto middle_node_prim = GetValueNode<PrimitivePtr>(middle_cnode->input(0));
|
||||
MS_CHECK_TRUE_MSG(middle_node_prim != nullptr, false, "GetValueNode failed");
|
||||
if (!transpose_strategy.CanChangeOpAxis(func_graph, middle_cnode)) {
|
||||
if (!transpose_strategy.CanChangeOpAxis(middle_cnode)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
@ -429,7 +428,7 @@ STATUS DecreaseTransposeAlgo::HandleGraphMultiNode(const FuncGraphPtr &func_grap
|
|||
}
|
||||
}
|
||||
TransTypePair trans_info;
|
||||
if (!JudgeCanOptimizerForMultiOp(func_graph, in_nodes, out_nodes, middle_nodes, &trans_info)) {
|
||||
if (!JudgeCanOptimizerForMultiOp(in_nodes, out_nodes, middle_nodes, &trans_info)) {
|
||||
return lite::RET_NO_CHANGE;
|
||||
}
|
||||
auto node_list = TopoSort(func_graph->get_return());
|
||||
|
@ -552,8 +551,8 @@ int DecreaseTransposeAlgo::ResetSubGraphInput() {
|
|||
return lite::RET_OK;
|
||||
}
|
||||
|
||||
int DecreaseTransposeAlgo::SetSubGraphOutput(const CNodePtr &cnode, const FuncGraphPtr &sub_graph) {
|
||||
MS_ASSERT(cnode != nullptr && sub_graph != nullptr);
|
||||
int DecreaseTransposeAlgo::SetSubGraphOutput(const FuncGraphPtr &sub_graph) {
|
||||
MS_ASSERT(sub_graph != nullptr);
|
||||
auto return_node = sub_graph->get_return();
|
||||
MS_ASSERT(return_node != nullptr);
|
||||
auto origin_input = return_node->inputs();
|
||||
|
@ -628,7 +627,7 @@ bool DecreaseTransposeAlgo::DecreaseTransposeForSingleOp(const FuncGraphPtr &fun
|
|||
return false;
|
||||
}
|
||||
(void)DecreaseTransposeForSingleOp(sub_func_graph);
|
||||
ret = SetSubGraphOutput(cnode, sub_func_graph);
|
||||
ret = SetSubGraphOutput(sub_func_graph);
|
||||
if (ret != lite::RET_OK) {
|
||||
MS_LOG(ERROR) << "SetSubGraphOutput failed";
|
||||
return false;
|
||||
|
@ -644,7 +643,7 @@ bool DecreaseTransposeAlgo::DecreaseTransposeForSingleOp(const FuncGraphPtr &fun
|
|||
return false;
|
||||
}
|
||||
(void)DecreaseTransposeForSingleOp(sub_func_graph);
|
||||
ret = SetSubGraphOutput(cnode, sub_func_graph);
|
||||
ret = SetSubGraphOutput(sub_func_graph);
|
||||
if (ret != lite::RET_OK) {
|
||||
MS_LOG(ERROR) << "SetSubGraphOutput failed";
|
||||
return false;
|
||||
|
|
|
@ -53,7 +53,7 @@ class DecreaseTransposeAlgo : public Pass {
|
|||
STATUS DoPreInsert(const FuncGraphPtr &func_graph, const CNodePtr &cnode, FormatTransNodeType trans_type);
|
||||
int SetSubGraphInput(const CNodePtr &cnode, const FuncGraphPtr &sub_graph);
|
||||
int ResetSubGraphInput();
|
||||
int SetSubGraphOutput(const CNodePtr &cnode, const FuncGraphPtr &sub_graph);
|
||||
int SetSubGraphOutput(const FuncGraphPtr &sub_graph);
|
||||
int ModifyCNodeFormat(const CNodePtr &cnode, FormatTransNodeType pre_trans_type);
|
||||
FmkType fmk_type_{converter::kFmkTypeMs};
|
||||
bool train_flag_{false};
|
||||
|
|
|
@ -117,7 +117,7 @@ bool GroupDepthwiseOpConvertPass::Run(const FuncGraphPtr &graph) {
|
|||
auto abstract = lite::CreateTensorAbstract(shape_vector, type_id);
|
||||
if (abstract == nullptr) {
|
||||
MS_LOG(ERROR) << "Create tensor abstarct failed";
|
||||
return RET_ERROR;
|
||||
return false;
|
||||
}
|
||||
weight_node->set_abstract(abstract);
|
||||
}
|
||||
|
|
|
@ -68,7 +68,8 @@ bool NodeInferShape::JudgeOpSupportInfer(const CNodePtr &cnode) {
|
|||
if (prim_t == nullptr) {
|
||||
return false;
|
||||
}
|
||||
auto parameter_gen = lite::PopulateRegistry::GetInstance()->GetParameterCreator(prim_t->value.type, lite::SCHEMA_CUR);
|
||||
auto parameter_gen =
|
||||
lite::PopulateRegistry::GetInstance()->GetParameterCreator(static_cast<int>(prim_t->value.type), lite::SCHEMA_CUR);
|
||||
if (parameter_gen == nullptr) {
|
||||
prim_t.reset();
|
||||
return false;
|
||||
|
@ -108,14 +109,14 @@ STATUS NodeInferShape::InferShape(const CNodePtr &cnode) {
|
|||
}
|
||||
std::vector<lite::Tensor *> inputs;
|
||||
std::transform(inputs_ptr.begin(), inputs_ptr.end(), std::back_inserter(inputs),
|
||||
[](TensorPtr input) { return input.get(); });
|
||||
[](const TensorPtr &input) { return input.get(); });
|
||||
std::vector<lite::Tensor *> outputs;
|
||||
std::transform(outputs_ptr.begin(), outputs_ptr.end(), std::back_inserter(outputs),
|
||||
[](TensorPtr output) { return output.get(); });
|
||||
[](const TensorPtr &output) { return output.get(); });
|
||||
auto ret = KernelInferShape(inputs, outputs, prim, {}, lite::SCHEMA_CUR);
|
||||
if (ret == lite::RET_NOT_SUPPORT) {
|
||||
auto parameter_gen =
|
||||
lite::PopulateRegistry::GetInstance()->GetParameterCreator(prim->value_type(), lite::SCHEMA_CUR);
|
||||
auto parameter_gen = lite::PopulateRegistry::GetInstance()->GetParameterCreator(
|
||||
static_cast<int>(prim->value_type()), lite::SCHEMA_CUR);
|
||||
if (parameter_gen == nullptr) {
|
||||
MS_LOG(ERROR) << "PopulateParameter return nullptr, type: " << schema::EnumNamePrimitiveType(prim->value_type());
|
||||
fbb.Clear();
|
||||
|
|
|
@ -279,8 +279,8 @@ STATUS SlicePreposePass::VerifySliceAttrs(const CNodePtr &slice_cnode, const int
|
|||
MS_LOG(DEBUG) << "Invalid slice axe attribute";
|
||||
return RET_ERROR;
|
||||
}
|
||||
MS_CHECK_TRUE_MSG(begin.size() >= axes.size(), RET_ERROR, "begin size is wrong");
|
||||
MS_CHECK_TRUE_MSG(size.size() >= axes.size(), RET_ERROR, "size.size() is wrong");
|
||||
MS_CHECK_TRUE_MSG(begin.size() <= axes.size(), RET_ERROR, "begin size is wrong");
|
||||
MS_CHECK_TRUE_MSG(size.size() <= axes.size(), RET_ERROR, "size.size() is wrong");
|
||||
for (size_t i = 0; i < axes.size(); ++i) {
|
||||
auto axe = axes[i];
|
||||
if (dim > -1 && axe >= dim) {
|
||||
|
@ -702,13 +702,11 @@ CNodePtr SlicePreposePass::CreateSlice2ForReshapePrepose(const FuncGraphPtr &gra
|
|||
}
|
||||
|
||||
bool SlicePreposePass::PreposeWithAbnormalReshape(const FuncGraphPtr &graph, const CNodePtr &slice_cnode,
|
||||
const CNodePtr &reshape_cnode, const CNodePtr &matmul_cnode,
|
||||
const std::vector<int64_t> &shape_in,
|
||||
const CNodePtr &matmul_cnode, const std::vector<int64_t> &shape_in,
|
||||
const std::vector<int64_t> &shape_out, const int64_t abnormal_axe_in,
|
||||
const int64_t abnormal_index_out) {
|
||||
MS_ASSERT(graph != nullptr);
|
||||
MS_ASSERT(slice_cnode != nullptr);
|
||||
MS_ASSERT(reshape_cnode != nullptr);
|
||||
auto manager = graph->manager();
|
||||
MS_CHECK_TRUE_MSG(manager != nullptr, false, "manager is nullptr");
|
||||
auto slice_node = GetSlice(slice_cnode);
|
||||
|
@ -936,6 +934,10 @@ bool SlicePreposePass::PreposeWithReshape(const FuncGraphPtr &graph, const CNode
|
|||
bool support_abnormal_mode = true; // if first mismatch axe are sliced and no more other axes are sliced, abnormal
|
||||
int64_t abnormal_index_out = GetReshapeAbnormalIndexOut(slice_cnode, mapped_axe, shape_out, &shape_out_copy,
|
||||
&is_normal_mode, &support_abnormal_mode);
|
||||
if (abnormal_index_out == -1) {
|
||||
MS_LOG(ERROR) << "GetReshapeAbnormalIndexOut failed.";
|
||||
return false;
|
||||
}
|
||||
if (is_normal_mode) {
|
||||
return PreposeWithNormalReshape(graph, slice_cnode, reshape_cnode, shape_in, shape_out_copy, mapped_axe);
|
||||
} else if (support_abnormal_mode) {
|
||||
|
@ -955,8 +957,8 @@ bool SlicePreposePass::PreposeWithReshape(const FuncGraphPtr &graph, const CNode
|
|||
MS_LOG(DEBUG) << "not matmul->reshape->slice pattern";
|
||||
return false;
|
||||
}
|
||||
return PreposeWithAbnormalReshape(graph, slice_cnode, reshape_cnode, matmul_cnode, shape_in, shape_out,
|
||||
abnormal_axe_in, abnormal_index_out);
|
||||
return PreposeWithAbnormalReshape(graph, slice_cnode, matmul_cnode, shape_in, shape_out, abnormal_axe_in,
|
||||
abnormal_index_out);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
@ -1448,7 +1450,8 @@ bool SlicePreposePass::MergeParallelSlice(const FuncGraphPtr &graph, const NodeU
|
|||
}
|
||||
for (size_t i = 1; i < slices->size(); ++i) {
|
||||
auto slice = utils::cast<CNodePtr>(slices->at(i).first);
|
||||
if (slice == nullptr || !CheckPrimitiveType(slice, prim::kPrimSliceFusion)) {
|
||||
MS_ASSERT(slice == nullptr);
|
||||
if (!CheckPrimitiveType(slice, prim::kPrimSliceFusion)) {
|
||||
MS_LOG(ERROR) << "current node is not Slice";
|
||||
return false;
|
||||
}
|
||||
|
|
|
@ -71,9 +71,9 @@ class SlicePreposePass : public Pass {
|
|||
const std::vector<int64_t> &new_shape1, int64_t abnormal_axe_in,
|
||||
int64_t count_sliced2, bool slice_at_front);
|
||||
static bool PreposeWithAbnormalReshape(const FuncGraphPtr &graph, const CNodePtr &slice_cnode,
|
||||
const CNodePtr &reshape_cnode, const CNodePtr &matmul_cnode,
|
||||
const std::vector<int64_t> &shape_in, const std::vector<int64_t> &shape_out,
|
||||
int64_t abnormal_axe_in, int64_t abnormal_index_out);
|
||||
const CNodePtr &matmul_cnode, const std::vector<int64_t> &shape_in,
|
||||
const std::vector<int64_t> &shape_out, int64_t abnormal_axe_in,
|
||||
int64_t abnormal_index_out);
|
||||
static bool GetArithmeticInputInfo(const CNodePtr &arithmetic_cnode, std::vector<AnfNodePtr> *inputs,
|
||||
std::vector<std::vector<int64_t>> *shapes, std::vector<bool> *is_default_params);
|
||||
|
||||
|
|
|
@ -393,14 +393,14 @@ bool TransposeStrategy::CanFusionIfInsert(const FuncGraphPtr &func_graph, const
|
|||
in_nodes.push_back(cnode->input(i));
|
||||
}
|
||||
}
|
||||
if (!IsInOutCanFuison(func_graph, in_nodes, &trans_count, &trans_info->pre_)) {
|
||||
if (!IsInOutCanFuison(in_nodes, &trans_count, &trans_info->pre_)) {
|
||||
return false;
|
||||
}
|
||||
std::vector<AnfNodePtr> out_nodes;
|
||||
if (GetPostNodes(func_graph, cnode, &out_nodes) != lite::RET_OK) {
|
||||
return false;
|
||||
}
|
||||
if (!IsInOutCanFuison(func_graph, out_nodes, &trans_count, &trans_info->post_)) {
|
||||
if (!IsInOutCanFuison(out_nodes, &trans_count, &trans_info->post_)) {
|
||||
return false;
|
||||
}
|
||||
if (trans_info->pre_ == trans_info->post_) {
|
||||
|
@ -425,8 +425,8 @@ bool TransposeStrategy::CanFusionIfInsert(const FuncGraphPtr &func_graph, const
|
|||
return can_insert;
|
||||
}
|
||||
|
||||
bool TransposeStrategy::CanChangeOpAxis(const FuncGraphPtr &func_graph, const CNodePtr &cnode) {
|
||||
MS_ASSERT(func_graph != nullptr && cnode != nullptr);
|
||||
bool TransposeStrategy::CanChangeOpAxis(const CNodePtr &cnode) {
|
||||
MS_ASSERT(cnode != nullptr);
|
||||
auto prim = GetValueNode<PrimitivePtr>(cnode->input(0));
|
||||
MS_CHECK_TRUE_MSG(prim != nullptr, false, "GetValueNode Failed");
|
||||
if (!IsDynamicFormatOp(prim->name())) {
|
||||
|
@ -506,9 +506,8 @@ STATUS TransposeStrategy::TransposeInsertDependOnShape(const FuncGraphPtr &func_
|
|||
return lite::RET_OK;
|
||||
}
|
||||
|
||||
bool TransposeStrategy::IsInOutCanFuison(const FuncGraphPtr &func_graph, const std::vector<AnfNodePtr> &nodes,
|
||||
size_t *trans_count, FormatTransNodeType *trans_type) {
|
||||
MS_ASSERT(func_graph != nullptr);
|
||||
bool TransposeStrategy::IsInOutCanFuison(const std::vector<AnfNodePtr> &nodes, size_t *trans_count,
|
||||
FormatTransNodeType *trans_type) {
|
||||
MS_ASSERT(trans_count != nullptr && trans_type != nullptr);
|
||||
for (auto &node : nodes) {
|
||||
if (CheckPrimitiveType(node, prim::kPrimTranspose)) {
|
||||
|
|
|
@ -42,14 +42,13 @@ class TransposeStrategy {
|
|||
bool CanFusionIfInsert(const FuncGraphPtr &func_graph, const CNodePtr &cnode, TransTypePair *trans_info,
|
||||
TransTypePair *trans_insert_info);
|
||||
STATUS ChangeOpAxis(const FuncGraphPtr &func_graph, const CNodePtr &cnode, FormatTransNodeType trans_type);
|
||||
bool CanChangeOpAxis(const FuncGraphPtr &func_graph, const CNodePtr &cnode);
|
||||
bool CanChangeOpAxis(const CNodePtr &cnode);
|
||||
|
||||
private:
|
||||
AnfNodePtr TransposeDependOnShape(const FuncGraphPtr &func_graph, const CNodePtr &cnode, const std::vector<int> &perm,
|
||||
bool before, size_t index);
|
||||
STATUS TransposeInsertDependOnShape(const FuncGraphPtr &func_graph, const CNodePtr &cnode, bool before, size_t index);
|
||||
bool IsInOutCanFuison(const FuncGraphPtr &func_graph, const std::vector<AnfNodePtr> &nodes, size_t *trans_count,
|
||||
FormatTransNodeType *trans_type);
|
||||
bool IsInOutCanFuison(const std::vector<AnfNodePtr> &nodes, size_t *trans_count, FormatTransNodeType *trans_type);
|
||||
void DecidePreAndPostTransType(TransTypePair *trans_info, TransTypePair *trans_insert_info) const;
|
||||
FmkType fmk_type_{converter::kFmkTypeMs};
|
||||
bool train_flag_{false};
|
||||
|
|
|
@ -368,7 +368,7 @@ int Conv2DInfo::InferReplaceOp() {
|
|||
size_t dev_num = strategy_.dev_num;
|
||||
if (split_mode_ == SplitCIN) {
|
||||
MS_LOG(DEBUG) << name_ << " : Split Cin, infer Forward op.";
|
||||
replace_op_ = CreateReduceNode(cnode_, parallel_output_nodes_, kAxisCIn, dev_num);
|
||||
replace_op_ = CreateReduceNode(cnode_, parallel_output_nodes_, dev_num);
|
||||
} else {
|
||||
int32_t concat_dim;
|
||||
if (split_mode_ == SplitN) {
|
||||
|
|
|
@ -110,11 +110,11 @@ void CreateSplitConstantTensors(const tensor::TensorPtr &constant_tensor, const
|
|||
[&](const tensor::TensorPtr &constant_tensor) { return (reinterpret_cast<char *>(constant_tensor->data_c())); });
|
||||
int64_t outer_total_dim = 1;
|
||||
for (int64_t i = 0; i < split_dim; i++) {
|
||||
outer_total_dim *= constant_shape[i];
|
||||
outer_total_dim *= static_cast<size_t>(constant_shape[i]);
|
||||
}
|
||||
int64_t inner_stride = 1;
|
||||
for (int64_t i = static_cast<int64_t>(constant_shape.size()) - 1; i > split_dim; i--) {
|
||||
inner_stride *= constant_shape[i];
|
||||
inner_stride *= static_cast<size_t>(constant_shape[i]);
|
||||
}
|
||||
auto constant_tensor_ptr = reinterpret_cast<char *>(constant_tensor->data_c());
|
||||
// init split_constant_tensor_data
|
||||
|
|
|
@ -159,7 +159,7 @@ AnfNodePtr MultiConvSplit::MultiConvNHSplit(const AnfNodePtr &node) {
|
|||
int res_conv_numbers = static_cast<int>(conv_nodes_.size() - 1);
|
||||
for (int32_t i = res_conv_numbers; i >= 0; i--) {
|
||||
std::vector<AnfNodePtr> outputs_node;
|
||||
if (!SplitSingleConv(conv_nodes_[i], split_outputs, {}, {}, &outputs_node)) {
|
||||
if (!SplitSingleConv(conv_nodes_[i], split_outputs, &outputs_node)) {
|
||||
MS_LOG(ERROR) << "SplitSingleConv failed";
|
||||
return nullptr;
|
||||
}
|
||||
|
@ -174,14 +174,13 @@ AnfNodePtr MultiConvSplit::MultiConvNHSplit(const AnfNodePtr &node) {
|
|||
}
|
||||
|
||||
bool MultiConvSplit::SplitSingleConv(const AnfNodePtr &ori_node, const std::vector<AnfNodePtr> &inputs_node,
|
||||
const std::vector<AnfNodePtr> &weight_nodes,
|
||||
const std::vector<AnfNodePtr> &bias_nodes, std::vector<AnfNodePtr> *outputs_node) {
|
||||
std::vector<AnfNodePtr> *outputs_node) {
|
||||
MS_ASSERT(ori_node != nullptr && outputs_node != nullptr);
|
||||
auto ori_conv_cnode = ori_node->cast<CNodePtr>();
|
||||
MS_ASSERT(ori_conv_cnode != nullptr);
|
||||
auto ori_attr = GetValueNode<std::shared_ptr<ops::Conv2DFusion>>(ori_conv_cnode->input(kAnfPrimitiveIndex));
|
||||
MS_ASSERT(ori_attr != nullptr);
|
||||
for (int64_t output_conv_index = 0; output_conv_index < (split_info_.out_num); output_conv_index++) {
|
||||
for (int output_conv_index = 0; output_conv_index < static_cast<int>(split_info_.out_num); output_conv_index++) {
|
||||
// Create Conv node attr
|
||||
auto conv_prim = CopyConvPrim(ori_attr);
|
||||
auto ori_node_name = ori_node->fullname_with_scope();
|
||||
|
|
|
@ -47,7 +47,6 @@ class MultiConvSplit : public MultiNodeSplit {
|
|||
int output_conv_index, std::vector<AnfNodePtr> *outputs_node);
|
||||
|
||||
virtual bool SplitSingleConv(const AnfNodePtr &ori_node, const std::vector<AnfNodePtr> &inputs_node,
|
||||
const std::vector<AnfNodePtr> &weight_node, const std::vector<AnfNodePtr> &bias_nodes,
|
||||
std::vector<AnfNodePtr> *outputs_node);
|
||||
|
||||
protected:
|
||||
|
|
|
@ -162,7 +162,7 @@ AnfNodePtr OperatorInfo::CreateConcateNode(const CNodePtr &orig_node, const std:
|
|||
}
|
||||
|
||||
AnfNodePtr OperatorInfo::CreateReduceNode(const CNodePtr &orig_node, const std::vector<AnfNodePtr> &input_nodes,
|
||||
int32_t reduce_dim, size_t input_nodes_num) {
|
||||
size_t input_nodes_num) {
|
||||
MS_EXCEPTION_IF_NULL(orig_node);
|
||||
if (input_nodes.size() != input_nodes_num) {
|
||||
MS_LOG(ERROR) << name_ << " : Input nodes size of reduce is not equal to input nodes number.";
|
||||
|
|
|
@ -66,7 +66,7 @@ class OperatorInfo {
|
|||
|
||||
AnfNodePtr CreateConcateNode(const CNodePtr &orig_node, const std::vector<AnfNodePtr> &input_nodes,
|
||||
int32_t concat_dim, size_t input_nodes_num);
|
||||
AnfNodePtr CreateReduceNode(const CNodePtr &orig_node, const std::vector<AnfNodePtr> &input_nodes, int32_t reduce_dim,
|
||||
AnfNodePtr CreateReduceNode(const CNodePtr &orig_node, const std::vector<AnfNodePtr> &input_nodes,
|
||||
size_t input_nodes_num);
|
||||
|
||||
std::shared_ptr<abstract::AbstractTensor> CreateFakeAbstractTensor() const;
|
||||
|
|
Loading…
Reference in New Issue