forked from mindspore-Ecosystem/mindspore
!47778 Clean code warning.
Merge pull request !47778 from Margaret_wangrui/clean_code
This commit is contained in:
commit
c32a8cd837
|
@ -1,5 +1,5 @@
|
||||||
/**
|
/**
|
||||||
* Copyright 2020-2022 Huawei Technologies Co., Ltd
|
* Copyright 2020-2023 Huawei Technologies Co., Ltd
|
||||||
*
|
*
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
* you may not use this file except in compliance with the License.
|
* you may not use this file except in compliance with the License.
|
||||||
|
@ -45,7 +45,7 @@ int64_t SplitTupleInputs(const FuncGraphPtr &graph, const AnfNodePtr &tuple_inpu
|
||||||
MS_EXCEPTION_IF_NULL(dyn_input_node);
|
MS_EXCEPTION_IF_NULL(dyn_input_node);
|
||||||
// Handle tuple nested scenes.
|
// Handle tuple nested scenes.
|
||||||
if (dyn_input_node->isa<CNode>() && common::AnfAlgo::CheckPrimitiveType(dyn_input_node, prim::kPrimMakeTuple)) {
|
if (dyn_input_node->isa<CNode>() && common::AnfAlgo::CheckPrimitiveType(dyn_input_node, prim::kPrimMakeTuple)) {
|
||||||
input_size += SplitTupleInputs(graph, dyn_input_node, plant_inputs);
|
input_size += LongToSize(SplitTupleInputs(graph, dyn_input_node, plant_inputs));
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
(void)plant_inputs->emplace_back(dyn_input_node);
|
(void)plant_inputs->emplace_back(dyn_input_node);
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
/**
|
/**
|
||||||
* This is the C++ adaptation and derivative work of Myia (https://github.com/mila-iqia/myia/).
|
* This is the C++ adaptation and derivative work of Myia (https://github.com/mila-iqia/myia/).
|
||||||
*
|
*
|
||||||
* Copyright 2019-2022 Huawei Technologies Co., Ltd
|
* Copyright 2019-2023 Huawei Technologies Co., Ltd
|
||||||
*
|
*
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
* you may not use this file except in compliance with the License.
|
* you may not use this file except in compliance with the License.
|
||||||
|
@ -145,7 +145,7 @@ AnfNodePtr HyperMap::FullMake(const std::shared_ptr<List> &type, const FuncGraph
|
||||||
MS_LOG(EXCEPTION) << "The lists in HyperMap should have the same length. " << oss.str();
|
MS_LOG(EXCEPTION) << "The lists in HyperMap should have the same length. " << oss.str();
|
||||||
}
|
}
|
||||||
|
|
||||||
// cannot use shared_from_base() also known as this, as it will make a reference cycle on
|
// Cannot use shared_from_base() also known as this, as it will make a reference cycle on
|
||||||
// hypermap and graph generated, it will cause memory leak.
|
// hypermap and graph generated, it will cause memory leak.
|
||||||
auto fn_rec = NewValueNode(std::make_shared<HyperMap>(*this));
|
auto fn_rec = NewValueNode(std::make_shared<HyperMap>(*this));
|
||||||
constexpr size_t kPrimHoldLen = 1;
|
constexpr size_t kPrimHoldLen = 1;
|
||||||
|
@ -205,7 +205,7 @@ AnfNodePtr HyperMap::FullMake(const std::shared_ptr<Tuple> &type, const FuncGrap
|
||||||
MS_LOG(EXCEPTION) << "The length of tuples in HyperMap must be the same. " << oss.str();
|
MS_LOG(EXCEPTION) << "The length of tuples in HyperMap must be the same. " << oss.str();
|
||||||
}
|
}
|
||||||
|
|
||||||
// cannot use shared_from_base() also known as this, as it will make a reference cycle on
|
// Cannot use shared_from_base() also known as this, as it will make a reference cycle on
|
||||||
// hypermap and graph generated, it will cause memory leak.
|
// hypermap and graph generated, it will cause memory leak.
|
||||||
auto fn_rec = NewValueNode(std::make_shared<HyperMap>(*this));
|
auto fn_rec = NewValueNode(std::make_shared<HyperMap>(*this));
|
||||||
constexpr size_t kPrimHoldLen = 1;
|
constexpr size_t kPrimHoldLen = 1;
|
||||||
|
@ -462,7 +462,7 @@ FuncGraphPtr PyExecuteGradient::GenerateFuncGraph(const AbstractBasePtrList &arg
|
||||||
// Make fprop first result, PyExecute's forward result.
|
// Make fprop first result, PyExecute's forward result.
|
||||||
AnfNodePtr out = fg->NewCNodeInOrder(params);
|
AnfNodePtr out = fg->NewCNodeInOrder(params);
|
||||||
|
|
||||||
// make fprop second result, PyExecute's backward function.
|
// Make fprop second result, PyExecute's backward function.
|
||||||
FuncGraphPtr bprop = std::make_shared<FuncGraph>();
|
FuncGraphPtr bprop = std::make_shared<FuncGraph>();
|
||||||
|
|
||||||
ss.str(std::string());
|
ss.str(std::string());
|
||||||
|
@ -1252,7 +1252,7 @@ FuncGraphPtr VmapOperation::GenerateFuncGraph(const AbstractBasePtrList &args_sp
|
||||||
vmap_fg->set_flag(FUNC_GRAPH_FLAG_CORE, true);
|
vmap_fg->set_flag(FUNC_GRAPH_FLAG_CORE, true);
|
||||||
vmap_fg->debug_info()->set_name(ss.str());
|
vmap_fg->debug_info()->set_name(ss.str());
|
||||||
|
|
||||||
// add parameter for `fn`, `in_axes` and `out_axes` respectively.
|
// Add parameter for `fn`, `in_axes` and `out_axes` respectively.
|
||||||
ParameterPtr param_graph = vmap_fg->add_parameter();
|
ParameterPtr param_graph = vmap_fg->add_parameter();
|
||||||
(void)vmap_fg->add_parameter();
|
(void)vmap_fg->add_parameter();
|
||||||
(void)vmap_fg->add_parameter();
|
(void)vmap_fg->add_parameter();
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/**
|
/**
|
||||||
* Copyright 2020-2022 Huawei Technologies Co., Ltd
|
* Copyright 2020-2023 Huawei Technologies Co., Ltd
|
||||||
*
|
*
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
* you may not use this file except in compliance with the License.
|
* you may not use this file except in compliance with the License.
|
||||||
|
@ -62,7 +62,7 @@ DFunctor::DFunctor(const FuncGraphPtr &primal_graph, const pipeline::ResourceBas
|
||||||
const auto &info = primal_graph->GetEffectInfo();
|
const auto &info = primal_graph->GetEffectInfo();
|
||||||
if (is_top_ && info.back_mem) {
|
if (is_top_ && info.back_mem) {
|
||||||
// Add Umonad arg for top graph.
|
// Add Umonad arg for top graph.
|
||||||
tape_->add_parameter();
|
(void)tape_->add_parameter();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
/**
|
/**
|
||||||
* This is the C++ adaptation and derivative work of Myia (https://github.com/mila-iqia/myia/).
|
* This is the C++ adaptation and derivative work of Myia (https://github.com/mila-iqia/myia/).
|
||||||
*
|
*
|
||||||
* Copyright 2019-2022 Huawei Technologies Co., Ltd
|
* Copyright 2019-2023 Huawei Technologies Co., Ltd
|
||||||
*
|
*
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
* you may not use this file except in compliance with the License.
|
* you may not use this file except in compliance with the License.
|
||||||
|
@ -226,7 +226,7 @@ class SimplifyDataStructuresRewriter : public BaseRewriter {
|
||||||
}
|
}
|
||||||
|
|
||||||
// DictGetItem --> PyExecute()
|
// DictGetItem --> PyExecute()
|
||||||
AnfNodePtr RebuidDictGetItem(const CNodePtr &node) const {
|
AnfNodePtr RebuildDictGetItem(const CNodePtr &node) const {
|
||||||
MS_EXCEPTION_IF_NULL(node);
|
MS_EXCEPTION_IF_NULL(node);
|
||||||
// Inputs should be [dict_setitem, dict, item]
|
// Inputs should be [dict_setitem, dict, item]
|
||||||
const size_t expect_inputs_size = 3;
|
const size_t expect_inputs_size = 3;
|
||||||
|
@ -291,7 +291,7 @@ class SimplifyDataStructuresRewriter : public BaseRewriter {
|
||||||
AnfNodePtr ConvertDictGetItem(const CNodePtr &node) {
|
AnfNodePtr ConvertDictGetItem(const CNodePtr &node) {
|
||||||
static const auto support_fallback_runtime = (common::GetEnv("MS_DEV_ENABLE_FALLBACK_RUNTIME") != "0");
|
static const auto support_fallback_runtime = (common::GetEnv("MS_DEV_ENABLE_FALLBACK_RUNTIME") != "0");
|
||||||
if (support_fallback_runtime && is_dict_output_) {
|
if (support_fallback_runtime && is_dict_output_) {
|
||||||
return RebuidDictGetItem(node);
|
return RebuildDictGetItem(node);
|
||||||
}
|
}
|
||||||
return ConvertDictGetItemToTupleGetItem(node);
|
return ConvertDictGetItemToTupleGetItem(node);
|
||||||
}
|
}
|
||||||
|
|
|
@ -86,7 +86,7 @@ void CastSameTypeEliminater::Visit(const AnfNodePtr &node) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
bool TwoCastEliminater::CheckTwoTypes(const std::map<TypeId, int> &type_map, TypeId type1, TypeId type2) {
|
bool TwoCastEliminater::CheckTwoTypes(const std::map<TypeId, int> &type_map, TypeId type1, TypeId type2) const {
|
||||||
auto type1_iter = type_map.find(type1);
|
auto type1_iter = type_map.find(type1);
|
||||||
auto type2_iter = type_map.find(type2);
|
auto type2_iter = type_map.find(type2);
|
||||||
if (type1_iter != type_map.end() && type2_iter != type_map.end()) {
|
if (type1_iter != type_map.end() && type2_iter != type_map.end()) {
|
||||||
|
@ -96,7 +96,7 @@ bool TwoCastEliminater::CheckTwoTypes(const std::map<TypeId, int> &type_map, Typ
|
||||||
}
|
}
|
||||||
|
|
||||||
bool TwoCastEliminater::CheckThreeTypes(const std::map<TypeId, int> &type_map, TypeId type1, TypeId type2,
|
bool TwoCastEliminater::CheckThreeTypes(const std::map<TypeId, int> &type_map, TypeId type1, TypeId type2,
|
||||||
TypeId type3) {
|
TypeId type3) const {
|
||||||
auto type1_iter = type_map.find(type1);
|
auto type1_iter = type_map.find(type1);
|
||||||
auto type2_iter = type_map.find(type2);
|
auto type2_iter = type_map.find(type2);
|
||||||
auto type3_iter = type_map.find(type3);
|
auto type3_iter = type_map.find(type3);
|
||||||
|
@ -107,8 +107,8 @@ bool TwoCastEliminater::CheckThreeTypes(const std::map<TypeId, int> &type_map, T
|
||||||
}
|
}
|
||||||
|
|
||||||
// {prim::kPrimCast, {prim::kPrimCast, X, Y}, T} -> {prim::kPrimCast, X, T}
|
// {prim::kPrimCast, {prim::kPrimCast, X, Y}, T} -> {prim::kPrimCast, X, T}
|
||||||
// x_type <= y_type <= t_type or x_type >= y_type >= t_type
|
// y_type == t_type or x_type <= y_type or x_type >= y_type >= t_type
|
||||||
bool TwoCastEliminater::CheckTypesIsIncrementalOrDecreasing() {
|
bool TwoCastEliminater::CheckTypesIsIncreasingOrDecreasing() {
|
||||||
auto x_type = x_->Type();
|
auto x_type = x_->Type();
|
||||||
if (x_type->isa<TensorType>()) {
|
if (x_type->isa<TensorType>()) {
|
||||||
x_type = x_type->cast<TensorTypePtr>()->element();
|
x_type = x_type->cast<TensorTypePtr>()->element();
|
||||||
|
@ -128,18 +128,21 @@ bool TwoCastEliminater::CheckTypesIsIncrementalOrDecreasing() {
|
||||||
auto x_type_id = x_type->type_id();
|
auto x_type_id = x_type->type_id();
|
||||||
auto y_type_id = y_type->type_id();
|
auto y_type_id = y_type->type_id();
|
||||||
auto t_type_id = t_type->type_id();
|
auto t_type_id = t_type->type_id();
|
||||||
|
// y_type == t_type
|
||||||
if (y_type_id == t_type_id) {
|
if (y_type_id == t_type_id) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
// If the precision is incremental or decreasing, the cast can be eliminated.
|
// If the precision is increasing or decreasing, the cast can be eliminated.
|
||||||
// x_type <= y_type
|
// x_type <= y_type
|
||||||
bool incremental = CheckTwoTypes(int_map_, x_type_id, y_type_id) || CheckTwoTypes(uint_map_, x_type_id, y_type_id) ||
|
bool increasing = CheckTwoTypes(int_map_, x_type_id, y_type_id) || CheckTwoTypes(uint_map_, x_type_id, y_type_id) ||
|
||||||
CheckTwoTypes(float_map_, x_type_id, y_type_id);
|
CheckTwoTypes(float_map_, x_type_id, y_type_id);
|
||||||
|
if (increasing) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
// x_type >= y_type >= t_type
|
// x_type >= y_type >= t_type
|
||||||
bool decreasing = CheckThreeTypes(int_map_, t_type_id, y_type_id, x_type_id) ||
|
return CheckThreeTypes(int_map_, t_type_id, y_type_id, x_type_id) ||
|
||||||
CheckThreeTypes(uint_map_, t_type_id, y_type_id, x_type_id) ||
|
CheckThreeTypes(uint_map_, t_type_id, y_type_id, x_type_id) ||
|
||||||
CheckThreeTypes(float_map_, t_type_id, y_type_id, x_type_id);
|
CheckThreeTypes(float_map_, t_type_id, y_type_id, x_type_id);
|
||||||
return incremental || decreasing;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// {prim::kPrimCast, {prim::kPrimCast, X, Y}, T}
|
// {prim::kPrimCast, {prim::kPrimCast, X, Y}, T}
|
||||||
|
@ -150,7 +153,7 @@ AnfNodePtr TwoCastEliminater::operator()(const OptimizerPtr &, const AnfNodePtr
|
||||||
if (x_ == nullptr || t_ == nullptr || y_ == nullptr) {
|
if (x_ == nullptr || t_ == nullptr || y_ == nullptr) {
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
if (CheckTypesIsIncrementalOrDecreasing()) {
|
if (CheckTypesIsIncreasingOrDecreasing()) {
|
||||||
auto cast_op = python_adapter::GetPyFn("mindspore.ops.operations", "Cast")();
|
auto cast_op = python_adapter::GetPyFn("mindspore.ops.operations", "Cast")();
|
||||||
ValuePtr cast = parse::data_converter::PyDataToValue(cast_op);
|
ValuePtr cast = parse::data_converter::PyDataToValue(cast_op);
|
||||||
auto cnode = NewCNode({NewValueNode(cast), x_, t_}, node->func_graph());
|
auto cnode = NewCNode({NewValueNode(cast), x_, t_}, node->func_graph());
|
||||||
|
|
|
@ -51,9 +51,9 @@ class TwoCastEliminater : public AnfVisitor {
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
bool CheckTypesIsIncrementalOrDecreasing();
|
bool CheckTypesIsIncreasingOrDecreasing();
|
||||||
bool CheckTwoTypes(const std::map<TypeId, int> &type_map, TypeId type1, TypeId type2);
|
bool CheckTwoTypes(const std::map<TypeId, int> &type_map, TypeId type1, TypeId type2) const;
|
||||||
bool CheckThreeTypes(const std::map<TypeId, int> &type_map, TypeId type1, TypeId type2, TypeId type3);
|
bool CheckThreeTypes(const std::map<TypeId, int> &type_map, TypeId type1, TypeId type2, TypeId type3) const;
|
||||||
std::map<TypeId, int> int_map_ = {
|
std::map<TypeId, int> int_map_ = {
|
||||||
{kNumberTypeInt, 0}, {kNumberTypeInt8, 1}, {kNumberTypeInt16, 2}, {kNumberTypeInt32, 3}, {kNumberTypeInt64, 4}};
|
{kNumberTypeInt, 0}, {kNumberTypeInt8, 1}, {kNumberTypeInt16, 2}, {kNumberTypeInt32, 3}, {kNumberTypeInt64, 4}};
|
||||||
std::map<TypeId, int> uint_map_ = {{kNumberTypeUInt, 0},
|
std::map<TypeId, int> uint_map_ = {{kNumberTypeUInt, 0},
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/**
|
/**
|
||||||
* Copyright 2022 Huawei Technologies Co., Ltd
|
* Copyright 2022-2023 Huawei Technologies Co., Ltd
|
||||||
*
|
*
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
* you may not use this file except in compliance with the License.
|
* you may not use this file except in compliance with the License.
|
||||||
|
@ -174,7 +174,8 @@ bool GRUCpuKernelMod::Launch(const std::vector<kernel::AddressPtr> &inputs, cons
|
||||||
reinterpret_cast<float *>(inputs[kInputWeightIndex]->addr) + weight_size_ + weight_h_size_);
|
reinterpret_cast<float *>(inputs[kInputWeightIndex]->addr) + weight_size_ + weight_h_size_);
|
||||||
} else {
|
} else {
|
||||||
auto size = GetSize(bias_desc_);
|
auto size = GetSize(bias_desc_);
|
||||||
if (memset_s(GetDataHandle(bias_memory_), size, 0, size)) {
|
auto ret = memset_s(GetDataHandle(bias_memory_), size, 0, size);
|
||||||
|
if (ret != EOK) {
|
||||||
MS_LOG(EXCEPTION) << "Bias memset error";
|
MS_LOG(EXCEPTION) << "Bias memset error";
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/**
|
/**
|
||||||
* Copyright 2022 Huawei Technologies Co., Ltd
|
* Copyright 2022-2023 Huawei Technologies Co., Ltd
|
||||||
*
|
*
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
* you may not use this file except in compliance with the License.
|
* you may not use this file except in compliance with the License.
|
||||||
|
@ -23,7 +23,7 @@ namespace mindspore {
|
||||||
namespace opt {
|
namespace opt {
|
||||||
class PrintValueType : public Pass {
|
class PrintValueType : public Pass {
|
||||||
public:
|
public:
|
||||||
explicit PrintValueType(const std::string &name) : Pass("print_value_type") {}
|
explicit PrintValueType(const std::string &) : Pass("print_value_type") {}
|
||||||
~PrintValueType() override = default;
|
~PrintValueType() override = default;
|
||||||
bool Run(const FuncGraphPtr &graph) override;
|
bool Run(const FuncGraphPtr &graph) override;
|
||||||
};
|
};
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/**
|
/**
|
||||||
* Copyright 2022 Huawei Technologies Co., Ltd
|
* Copyright 2022-2023 Huawei Technologies Co., Ltd
|
||||||
*
|
*
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
* you may not use this file except in compliance with the License.
|
* you may not use this file except in compliance with the License.
|
||||||
|
@ -71,6 +71,7 @@ class MS_CORE_API MapTensor final : public Tensor {
|
||||||
key_shape_ = {abstract::Shape::kShapeDimAny};
|
key_shape_ = {abstract::Shape::kShapeDimAny};
|
||||||
shape_ = {abstract::Shape::kShapeDimAny};
|
shape_ = {abstract::Shape::kShapeDimAny};
|
||||||
(void)shape_.insert(shape_.cend(), value_shape.cbegin(), value_shape.cend());
|
(void)shape_.insert(shape_.cend(), value_shape.cbegin(), value_shape.cend());
|
||||||
|
size_ = shape_[0];
|
||||||
ShapeVector key_shape = {abstract::Shape::kShapeDimAny};
|
ShapeVector key_shape = {abstract::Shape::kShapeDimAny};
|
||||||
key_tensor_ = std::make_shared<Tensor>(key_dtype, key_shape);
|
key_tensor_ = std::make_shared<Tensor>(key_dtype, key_shape);
|
||||||
value_tensor_ = std::make_shared<Tensor>(value_dtype, shape_);
|
value_tensor_ = std::make_shared<Tensor>(value_dtype, shape_);
|
||||||
|
@ -90,13 +91,13 @@ class MS_CORE_API MapTensor final : public Tensor {
|
||||||
MapTensor(const TensorPtr &key_tensor, const TensorPtr &value_tensor, const TensorPtr &status_tensor,
|
MapTensor(const TensorPtr &key_tensor, const TensorPtr &value_tensor, const TensorPtr &status_tensor,
|
||||||
const ValuePtr &default_value, const ValuePtr &permit_filter_value = nullptr,
|
const ValuePtr &default_value, const ValuePtr &permit_filter_value = nullptr,
|
||||||
const ValuePtr &evict_filter_value = nullptr)
|
const ValuePtr &evict_filter_value = nullptr)
|
||||||
: default_value_(default_value) {
|
: key_dtype_(key_tensor->data_type()), default_value_(default_value) {
|
||||||
key_dtype_ = key_tensor->data_type();
|
|
||||||
data_type_ = value_tensor->data_type();
|
data_type_ = value_tensor->data_type();
|
||||||
shape_ = value_tensor->shape();
|
shape_ = value_tensor->shape();
|
||||||
key_shape_ = key_tensor->shape();
|
key_shape_ = key_tensor->shape();
|
||||||
value_shape_.clear();
|
value_shape_.clear();
|
||||||
(void)value_shape_.insert(value_shape_.cend(), shape_.cbegin() + 1, shape_.cend());
|
(void)value_shape_.insert(value_shape_.cend(), shape_.cbegin() + 1, shape_.cend());
|
||||||
|
size_ = shape_.size() != 0 ? shape_[0] : (abstract::Shape::kShapeDimAny);
|
||||||
key_tensor_ = key_tensor;
|
key_tensor_ = key_tensor;
|
||||||
value_tensor_ = value_tensor;
|
value_tensor_ = value_tensor;
|
||||||
status_tensor_ = status_tensor;
|
status_tensor_ = status_tensor;
|
||||||
|
|
Loading…
Reference in New Issue