!29078 Clean code for compiler

Merge pull request !29078 from YuJianfeng/r1.6
This commit is contained in:
i-robot 2022-02-08 03:31:44 +00:00 committed by Gitee
commit 358b7124bc
No known key found for this signature in database
GPG Key ID: 173E9B9CA92EEF8F
11 changed files with 46 additions and 48 deletions

View File

@ -281,7 +281,7 @@ class FuncGraphManager : public std::enable_shared_from_this<FuncGraphManager>,
public deprecated::api::FuncGraphManager {
public:
explicit FuncGraphManager(const std::vector<FuncGraphPtr> &roots, bool manage = true);
~FuncGraphManager() {
~FuncGraphManager() noexcept {
if (is_manage_) {
RemoveRoots();
}

View File

@ -99,7 +99,7 @@ std::string MetaTensor::ToString() const {
std::string MetaTensor::DumpText() const {
std::ostringstream oss;
oss << type_name() << "(" << SizeToInt(data_type_) << ")[";
oss << type_name() << "(" << static_cast<int>(data_type_) << ")[";
for (size_t i = 0; i < shape_.size(); ++i) {
oss << (i > 0 ? ", " : "") << shape_[i];
}

View File

@ -174,12 +174,12 @@ class MS_CORE_API MetaTensor : public Value {
int ElementsNum() const;
std::size_t hash() const override {
std::size_t hash_value = std::hash<int>{}(SizeToInt(data_type_));
std::size_t hash_value = std::hash<int>{}(static_cast<int>(data_type_));
hash_value = hash_combine(hash_value, std::hash<size_t>{}(shape_.size()));
// hash all elements may costly, so only take at most 4 elements into account based on
// some experiments.
for (size_t i = 0; (i < shape_.size()) && (i < 4); ++i) {
hash_value = hash_combine(hash_value, (std::hash<int>{}(shape_[i])));
hash_value = hash_combine(hash_value, (std::hash<int>{}(LongToInt(shape_[i]))));
}
return hash_value;
}

View File

@ -64,6 +64,7 @@ class PIsEqual {
template <typename T = AnfNodePtr>
class PatternNode : public PBase<PatternNode<T> > {
public:
virtual ~PatternNode() = default;
T GetNode(const AnfNodePtr &) const {
if (!captured_) {
MS_EXCEPTION(ValueError) << "A Pattern wasn't captured for this Token before the call to GetNode.";
@ -169,7 +170,7 @@ struct apply_func_tuple_item {
template <size_t Index, typename Func>
struct apply_func_tuple_item<true, Index, Func> {
template <typename TTuple>
static void apply(Func *func, const TTuple &tuple) {}
static void apply(Func *, const TTuple &) {}
};
template <typename Func, typename TTuple>
@ -179,7 +180,7 @@ inline void apply_func_tuple(Func *func, const TTuple &tuple) {
struct PTupleResetCapture {
template <typename T>
void operator()(size_t i, const T &pattern) const {
void operator()(size_t, const T &pattern) const {
pattern.Reset();
}
};
@ -221,7 +222,7 @@ template <typename... TArgs>
class PCNode : public PBase<PCNode<TArgs...> > {
public:
explicit PCNode(const TArgs &... args) : args_(args...) {}
~PCNode() = default;
virtual ~PCNode() = default;
AnfNodePtr GetNode(const AnfNodePtr &node) const {
tuple_utils::PTupleGetNode get_node(node);
@ -260,13 +261,13 @@ class PCNode : public PBase<PCNode<TArgs...> > {
// Pattern may accept extra (non specified) nodes at the end of the CNode
// There must be at least `min_extra_nodes` additional nodes in the inputs.
if (inputs.size() >= pattern_arg_len + min_extra_nodes_) {
AnfNodePtrList tokens(inputs.begin(), inputs.begin() + pattern_arg_len);
AnfNodePtrList tokens(inputs.begin(), inputs.begin() + SizeToLong(pattern_arg_len));
tuple_utils::PTupleCapture capture_func(tokens);
tuple_utils::apply_func_tuple(&capture_func, args_);
// If it could capture the initial set of nodes specified in the Pattern
// and there are enough extra inputs to add
if (capture_func.captured_ && inputs.size() > pattern_arg_len) {
extra_nodes_.insert(extra_nodes_.end(), inputs.begin() + pattern_arg_len, inputs.end());
extra_nodes_.insert(extra_nodes_.end(), inputs.begin() + SizeToLong(pattern_arg_len), inputs.end());
return true;
}
return capture_func.captured_;
@ -304,7 +305,7 @@ template <typename... TArgs>
class PPrimitive : public PBase<PPrimitive<TArgs...> > {
public:
explicit PPrimitive(const PrimitivePtr &prim, const TArgs &... args) : prim_(prim), args_(args...) {}
~PPrimitive() = default;
virtual ~PPrimitive() = default;
AnfNodePtr GetNode(const AnfNodePtr &node) const {
tuple_utils::PTupleGetNode get_node(node);
@ -348,7 +349,7 @@ class PPrimitive : public PBase<PPrimitive<TArgs...> > {
// Pattern may accept extra (non specified) nodes at the end of the Primitive
// There must be at least `min_extra_nodes` additional nodes in the inputs.
if ((inputs.size() - 1) >= pattern_arg_len + min_extra_nodes_) {
AnfNodePtrList tokens(inputs.begin() + 1, inputs.begin() + 1 + pattern_arg_len);
AnfNodePtrList tokens(inputs.begin() + 1, inputs.begin() + 1 + SizeToLong(pattern_arg_len));
tuple_utils::PTupleCapture capture_func(tokens);
tuple_utils::apply_func_tuple(&capture_func, args_);
// If it could capture the initial set of nodes specified in the Pattern
@ -356,7 +357,7 @@ class PPrimitive : public PBase<PPrimitive<TArgs...> > {
if (capture_func.captured_) {
captured_prim_node_ = node;
if (inputs.size() > pattern_arg_len + 1) {
extra_nodes_.insert(extra_nodes_.end(), inputs.begin() + 1 + pattern_arg_len, inputs.end());
extra_nodes_.insert(extra_nodes_.end(), inputs.begin() + 1 + SizeToLong(pattern_arg_len), inputs.end());
}
}
return capture_func.captured_;
@ -431,7 +432,7 @@ class PConstant : public PBase<PConstant<T> > {
check_value_(check_value),
is_scalar_(is_scalar) {}
~PConstant() = default;
virtual ~PConstant() = default;
// Sets as_node_ as the node received as argument to produce a same-shape node with GetNode
const PConstant<T> &WithShapeAs(const AnfNodePtr &node) const {
if (node == nullptr) {
@ -479,7 +480,7 @@ class PConstant : public PBase<PConstant<T> > {
return *this;
}
AnfNodePtr GetNode(const AnfNodePtr &node) const {
AnfNodePtr GetNode(const AnfNodePtr &) const {
// If a NewValueNode was requested (using NewValue function) then return that created node.
if (is_new_value_node_) {
return captured_node_;
@ -578,7 +579,7 @@ class PConstant : public PBase<PConstant<T> > {
return IsTensorConstant(value);
}
void *GetPointerToTensorData(const AnfNodePtr &node, bool writable = false) const {
void *GetPointerToTensorData(const AnfNodePtr &node) const {
if (!node->isa<ValueNode>()) {
return nullptr;
}
@ -645,7 +646,7 @@ class PConstant : public PBase<PConstant<T> > {
auto tensor_type_byte = GetTypeByte(tensor_type_ptr);
char *data = reinterpret_cast<char *>(new_tensor_ptr->data_c());
for (int i = 0; i < new_tensor_ptr->ElementsNum(); i++) {
ret = memcpy_s(data + i * tensor_type_byte, tensor_type_byte, source_data, tensor_type_byte);
ret = memcpy_s(data + IntToSize(i) * tensor_type_byte, tensor_type_byte, source_data, tensor_type_byte);
if (ret != 0) {
MS_LOG(INFO) << "memcpy_s error, error no " << ret << ", source size " << tensor_type_byte << ", dest size "
<< tensor_type_byte;

View File

@ -51,7 +51,7 @@ class PrimalAttrGuard {
explicit PrimalAttrGuard(const mindspore::HashMap<std::string, ValuePtr> &primal_attrs) {
PrimalAttrManager::GetInstance().SetPrimalAttr(primal_attrs);
}
~PrimalAttrGuard() { PrimalAttrManager::GetInstance().ClearPrimalAttr(); }
~PrimalAttrGuard() noexcept { PrimalAttrManager::GetInstance().ClearPrimalAttr(); }
};
} // namespace mindspore
#endif // MINDSPORE_CORE_IR_PRIMAL_ATTR_H_

View File

@ -53,7 +53,7 @@ class PrimalDebugInfoGuard {
explicit PrimalDebugInfoGuard(const std::vector<NodeDebugInfoPtr> &primal_debug_infos) {
PrimalDebugInfoManager::GetInstance().SetPrimalDebugInfo(primal_debug_infos);
}
~PrimalDebugInfoGuard() { PrimalDebugInfoManager::GetInstance().ClearPrimalDebugInfo(); }
~PrimalDebugInfoGuard() noexcept { PrimalDebugInfoManager::GetInstance().ClearPrimalDebugInfo(); }
};
} // namespace mindspore
#endif // MINDSPORE_CORE_IR_PRIMAL_DEBUG_INFO_H

View File

@ -31,7 +31,7 @@ namespace mindspore {
// Supported meta type
enum PrimType {
kPrimTypeUnknown = 0,
kPrimTypeBegin = kTypeUnknown,
kPrimTypeBegin = kPrimTypeUnknown,
kPrimTypeBuiltIn, // Built-in primitive operator
kPrimTypePyInfer, // Primitive operator with python infer function
kPrimTypeUserCustom, // Primitive operator defined by custom
@ -102,7 +102,7 @@ class MS_CORE_API Primitive : public Named {
///
/// \param[in] args The arguments of primitive need to compute.
/// \return The primitive's calculation result.
virtual BaseRef RunComputeFunction(const VectorRef &args) const { return nullptr; }
virtual BaseRef RunComputeFunction(const VectorRef &) const { return nullptr; }
/// \brief Get Primitive's attribute.
///
/// \param[in] attrName Primitive attribute name.
@ -213,7 +213,7 @@ class MS_CORE_API Primitive : public Named {
/// \brief Get const input index of the primitive.
///
/// \return Const input indexes of the primitive.
const std::vector<size_t> &get_const_input_indexes() { return const_input_indexes_; }
const std::vector<size_t> &get_const_input_indexes() const { return const_input_indexes_; }
/// \brief Get Primitive's id.
///
/// \return primitive's Id.

View File

@ -409,7 +409,7 @@ class TensorDataImpl : public TensorData {
ss << ']';
}
std::string ProcessPlaceholder(std::ostringstream &ss, int max_width) const {
std::string ProcessPlaceholder(const std::ostringstream &ss, int max_width) const {
std::string str = ss.str();
if constexpr (std::is_same<T, bool>::value || std::is_same<T, float16>::value || std::is_same<T, float>::value ||
std::is_same<T, double>::value) {

View File

@ -109,6 +109,8 @@ using TensorDataPtr = std::shared_ptr<TensorData>;
class WaitEvent : public ExceptionListener {
public:
~WaitEvent() = default;
void OnException() override { set_need_wait(false); }
void Wait() const {
@ -399,7 +401,7 @@ class MS_CORE_API Tensor final : public MetaTensor {
/// \brief Set the cast dtype of this Tensor.
///
/// \param[in] dtype The input cast dtype.
void set_cast_dtype(TypePtr dtype = nullptr) { cast_dtype_ = dtype; }
void set_cast_dtype(const TypePtr &dtype = nullptr) { cast_dtype_ = dtype; }
/// \brief Used cache_enable to update the tensor from the cache to the host.
///
@ -419,7 +421,7 @@ class MS_CORE_API Tensor final : public MetaTensor {
/// \brief Set the pointer of hashmap tensor.
///
/// \param[in] hashmap_tensor_ptr The input pointer of hashmap tensor.
void set_hashmap_tensor_ptr(std::shared_ptr<Tensor> hashmap_tensor_ptr = nullptr) {
void set_hashmap_tensor_ptr(const std::shared_ptr<Tensor> &hashmap_tensor_ptr = nullptr) {
hashmap_tensor_ptr_ = hashmap_tensor_ptr;
}
@ -431,7 +433,7 @@ class MS_CORE_API Tensor final : public MetaTensor {
/// \brief Set the pointer of cache tensor.
///
/// \param[in] cache_tensor_ptr The input pointer of cache tensor.
void set_cache_tensor_ptr(std::shared_ptr<Tensor> cache_tensor_ptr = nullptr) {
void set_cache_tensor_ptr(const std::shared_ptr<Tensor> &cache_tensor_ptr = nullptr) {
cache_tensor_ptr_ = cache_tensor_ptr;
}
@ -488,7 +490,7 @@ class MS_CORE_API Tensor final : public MetaTensor {
/// \brief Set synchronization status.
///
/// \param[in] sync_status The input synchronization status.
void set_sync_status(TensorSyncStatus sync_status) { sync_status_ = sync_status; }
void set_sync_status(TensorSyncStatus sync_status) const { sync_status_ = sync_status; }
/// \brief Get synchronization status.
///
@ -513,7 +515,7 @@ class MS_CORE_API Tensor final : public MetaTensor {
/// \brief Check if this Tensor is the output of graph.
///
/// \return Whether this Tensor is the output of graph
bool IsGraphOutput() { return graph_output_; }
bool IsGraphOutput() const { return graph_output_; }
/// \brief Set whether this Tensor is the output of graph.
void SetIsGraphOutput() { graph_output_ = true; }
@ -521,7 +523,7 @@ class MS_CORE_API Tensor final : public MetaTensor {
/// \brief Get whether this Tensor is updated by the device.
///
/// \return Whether this Tensor is updated by the device.
bool IsUpdatedByDevice() { return updated_by_device_; }
bool IsUpdatedByDevice() const { return updated_by_device_; }
/// \brief Set whether this Tensor is updated by the device.
void SetIsUpdateByDevice() { updated_by_device_ = true; }

View File

@ -40,9 +40,6 @@ namespace mindspore {
std::map<std::string, tensor::TensorPtr> MSANFModelParser::load_tensor_map_;
namespace {
static constexpr char kConstantValueNode[] = "Constant";
static constexpr char kCNodeShapeAttr[] = "shape";
static constexpr char kCNodeShape1Attr[] = "shape1";
static constexpr char kCNodeShape2Attr[] = "shape2";
static constexpr char kDoSignaturePrimitivePrefix[] = "S-Prim-";
static constexpr char kHyperMapPrefix[] = "hyper_map";
@ -81,7 +78,7 @@ template <typename T, typename P>
std::shared_ptr<T> ParserAttr(const std::string &str, const mindspore::HashMap<string, P> &kv) {
std::stack<std::string> rules;
std::stack<P> value;
int count = 0;
size_t count = 0;
for (size_t i = 0; i < str.length(); i++) {
if (str[i] == '[') {
rules.push(std::string("["));
@ -110,7 +107,7 @@ std::shared_ptr<T> ParserAttr(const std::string &str, const mindspore::HashMap<s
} else {
count++;
if (str[i + 1] == '[' || str[i + 1] == ']' || str[i + 1] == ',') {
auto value_name = str.substr(static_cast<int>(i) - count + 1, count);
auto value_name = str.substr((i - count) + 1, count);
if (kv.find(value_name) == kv.end()) {
MS_LOG(ERROR) << "Node's attributes and shape do not match.";
return nullptr;
@ -128,7 +125,7 @@ template <typename T>
std::shared_ptr<T> ParserScalarAttrValue(const std::string &attr_name, const mindspore::HashMap<string, ValuePtr> &kv) {
std::string str = attr_name;
auto replace = [&](const string &orgStr, const string &newStr) {
std::string::size_type pos(0);
std::string::size_type pos;
while ((pos = str.find(orgStr)) != std::string::npos) {
str.replace(pos, orgStr.length(), newStr);
}
@ -148,7 +145,7 @@ std::shared_ptr<abstract::AbstractTuple> ParserAttrShape(
const std::string &attr_name, const mindspore::HashMap<string, abstract::AbstractBasePtr> &kv) {
std::string str = attr_name;
auto replace = [&](const string &orgStr, const string &newStr) {
std::string::size_type pos(0);
std::string::size_type pos;
while ((pos = str.find(orgStr)) != std::string::npos) {
str.replace(pos, orgStr.length(), newStr);
}
@ -167,7 +164,7 @@ std::shared_ptr<abstract::AbstractTuple> ParserAttrShape(
std::string ParseParameterName(const string &name) {
string delimiter = ":";
size_t pos(0);
size_t pos;
if ((pos = name.find(delimiter)) != string::npos) {
return name.substr(pos + 1, string::npos - (pos + 1));
}
@ -561,7 +558,7 @@ bool MSANFModelParser::ObtainCNodeAttrInTypeForm(const PrimitivePtr &prim, const
}
ValuePtr MSANFModelParser::ParseAttrInScalarForm(const mind_ir::AttributeProto &attr_proto, int index) {
const int attr_type = attr_proto.type();
const int attr_type = static_cast<int>(attr_proto.type());
switch (attr_type) {
case mind_ir::AttributeProto_AttributeType_STRING: {
return ParseAttrInScalar_string_string(attr_proto, index);
@ -631,7 +628,7 @@ void MSANFModelParser::ObtainCNodeAttrInScalarForm(const mind_ir::AttributeProto
}
ValuePtr MSANFModelParser::ObtainCNodeAttrInSingleScalarForm(const mind_ir::AttributeProto &attr_proto) {
const int attr_type = attr_proto.type();
const int attr_type = static_cast<int>(attr_proto.type());
switch (attr_type) {
case mind_ir::AttributeProto_AttributeType_STRING: {
return ParseAttrInSingleScalar_string_string(attr_proto);
@ -707,8 +704,7 @@ bool MSANFModelParser::GetAttrValueForCNode(const PrimitivePtr &prim, const mind
break;
}
case FORM_PARSE_SCALAR: {
std::size_t value_pos(0);
if ((value_pos = ref_attr_name.find("value0")) != std::string::npos) {
if (ref_attr_name.find("value0") != std::string::npos) {
ValuePtr res = ObtainCNodeAttrInSingleScalarForm(attr_proto);
const std::string &op_type = prim->name();
if (!IsLite()) {
@ -855,7 +851,7 @@ bool MSANFModelParser::GetAttrValueForValueNode(const std::string &value_node_na
}
const std::string &ref_attr_name = attr_proto.ref_attr_name();
string type = "";
std::size_t pos(0);
std::size_t pos;
if ((pos = ref_attr_name.find("scalar:")) != std::string::npos) {
type = ref_attr_name.substr(pos, string("scalar:").length() - 1);
} else if ((pos = ref_attr_name.find("type:")) != std::string::npos) {
@ -876,15 +872,14 @@ bool MSANFModelParser::GetAttrValueForValueNode(const std::string &value_node_na
break;
}
case FORM_PARSE_SCALAR: {
std::size_t value_pos(0);
if ((value_pos = ref_attr_name.find("value0")) != std::string::npos) {
if (ref_attr_name.find("value0") != std::string::npos) {
auto res = ObtainCNodeAttrInSingleScalarForm(attr_proto);
new_value_node = NewValueNode(res);
new_value_node->set_abstract(res->ToAbstract());
anfnode_build_map_[value_node_name] = new_value_node;
break;
}
if ((value_pos = ref_attr_name.find("Tuple[]")) != std::string::npos) {
if (ref_attr_name.find("Tuple[]") != std::string::npos) {
MS_LOG(INFO) << "Build Tuple() ValueNode for primitive.";
ValuePtr res = MakeValue(std::vector<ValuePtr>{});
new_value_node = NewValueNode(res);
@ -892,7 +887,7 @@ bool MSANFModelParser::GetAttrValueForValueNode(const std::string &value_node_na
anfnode_build_map_[value_node_name] = new_value_node;
break;
}
if ((value_pos = ref_attr_name.find("Tuple[value")) != std::string::npos && attr_proto.tensors_size() > 1) {
if (ref_attr_name.find("Tuple[value") != std::string::npos && attr_proto.tensors_size() > 1) {
MS_LOG(INFO) << "Build TupleTensor ValueNode for primitive.";
ObtainValueNodeInTupleTensorForm(value_node_name, attr_proto);
break;
@ -962,7 +957,7 @@ static std::string GetDoSignaturePrimitiveName(const std::string &node_type) {
}
// hyper_map[xxx] -> xxx
constexpr auto offset = 2;
auto op_name = prim_name.substr(strlen(kHyperMapPrefix) + 1, prim_name.length() - strlen(kHyperMapPrefix) - offset);
auto op_name = prim_name.substr(strlen(kHyperMapPrefix) + 1, (prim_name.length() - strlen(kHyperMapPrefix)) - offset);
return op_name;
}
@ -1004,7 +999,7 @@ AnfNodePtr MSANFModelParser::BuildOperatorNode(const mind_ir::NodeProto &node_pr
return std::make_shared<ValueNode>(prim);
}
bool MSANFModelParser::CheckCNodePrim(CNodePtr cnode_ptr) {
bool MSANFModelParser::CheckCNodePrim(const CNodePtr &cnode_ptr) {
// Handle control flow operator.
auto operatorPtr = cnode_ptr->input(0);
// Set abstract of switch(c,f,t),switchLayer(c,tup) and

View File

@ -75,7 +75,7 @@ class MSANFModelParser {
bool ObtainCNodeAttrInTensorForm(const PrimitivePtr &prim, const mind_ir::AttributeProto &attr_proto);
bool BuildValueNodeForFuncGraph(const mind_ir::NodeProto &node_proto);
AnfNodePtr BuildOperatorNode(const mind_ir::NodeProto &node_proto);
bool CheckCNodePrim(CNodePtr cnode_ptr);
bool CheckCNodePrim(const CNodePtr &cnode_ptr);
bool SetEmptyTensorProtoCNodeAbstract(const AnfNodePtr &node_ptr);
bool SetCNodeAbstract(const mind_ir::AttributeProto &attr_proto, const CNodePtr &cnode_ptr);
bool SetNodeAbstractFromAttrProto(const mind_ir::AttributeProto &attr_proto, const AnfNodePtr &node_ptr);