forked from mindspore-Ecosystem/mindspore
!38673 fix code check
Merge pull request !38673 from lianliguang/master
This commit is contained in:
commit
abef6409dc
|
@ -142,7 +142,7 @@ class MS_API Status {
|
|||
private:
|
||||
// api without std::string
|
||||
Status(enum StatusCode status_code, const std::vector<char> &status_msg);
|
||||
Status(const enum StatusCode code, int line_of_code, const char *file_name, const std::vector<char> &extra);
|
||||
Status(enum StatusCode code, int line_of_code, const char *file_name, const std::vector<char> &extra);
|
||||
std::vector<char> ToCString() const;
|
||||
std::vector<char> GetErrDescriptionChar() const;
|
||||
std::vector<char> SetErrDescription(const std::vector<char> &err_description);
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
#include "cxx_api/model/model_impl.h"
|
||||
#include <string>
|
||||
#include "cxx_api/dlutils.h"
|
||||
|
||||
namespace mindspore {
|
||||
|
|
|
@ -197,7 +197,7 @@ class TensorStringifier {
|
|||
TensorStringifier(const T *data, size_t data_size, size_t ndim) : data_(data), data_size_(data_size), ndim_(ndim) {}
|
||||
~TensorStringifier() = default;
|
||||
|
||||
std::string ToString(TypeId type, const ShapeVector &shape, bool use_comma) const {
|
||||
std::string ToString(TypeId, const ShapeVector &shape, bool use_comma) const {
|
||||
constexpr auto valid =
|
||||
std::is_same<T, bool>::value || std::is_same<T, uint8_t>::value || std::is_same<T, int8_t>::value ||
|
||||
std::is_same<T, int16_t>::value || std::is_same<T, int32_t>::value || std::is_same<T, int64_t>::value ||
|
||||
|
@ -214,7 +214,8 @@ class TensorStringifier {
|
|||
|
||||
std::ostringstream ss;
|
||||
if (data_size_ == 1 && ndim_ == 0) { // Scalar
|
||||
OutputDataString(ss, 0, 0, 1, false, 0);
|
||||
int max = 0;
|
||||
OutputDataString(ss, 0, 0, 1, false, &max);
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
|
@ -919,7 +920,7 @@ static std::map<TypeId, std::vector<TensorChunk>> GroupingTensors(const TensorPt
|
|||
auto &chunk = chunks.back();
|
||||
chunk.size += tensor->DataSize();
|
||||
chunk.bytes += tensor_bytes;
|
||||
chunk.tensors.emplace_back(tensor);
|
||||
(void)chunk.tensors.emplace_back(tensor);
|
||||
}
|
||||
return group_info;
|
||||
}
|
||||
|
@ -980,7 +981,7 @@ TensorPtrList Tensor::GetFlattenedTensors(const TensorPtrList &tensors) {
|
|||
TensorPtrList result_tensors;
|
||||
for (auto &entry : chunk_map) {
|
||||
auto &chunk_tensors = entry.second;
|
||||
result_tensors.insert(result_tensors.end(), chunk_tensors.begin(), chunk_tensors.end());
|
||||
(void)result_tensors.insert(result_tensors.end(), chunk_tensors.begin(), chunk_tensors.end());
|
||||
}
|
||||
return result_tensors;
|
||||
}
|
||||
|
@ -1052,7 +1053,6 @@ const size_t CSRTensor::GetSizeAt(size_t index) const {
|
|||
return sizeof(int64_t);
|
||||
}
|
||||
MS_LOG(EXCEPTION) << "Invalid index: " << index << " for CSRTensor: " << ToString();
|
||||
return kTypeUnknown;
|
||||
}
|
||||
|
||||
TensorPtr CSRTensor::GetTensorAt(size_t index) const {
|
||||
|
@ -1069,7 +1069,6 @@ TensorPtr CSRTensor::GetTensorAt(size_t index) const {
|
|||
return std::make_shared<tensor::Tensor>(shape_[index - kShapeIdx], TypeIdToType(kNumberTypeInt64));
|
||||
}
|
||||
MS_LOG(EXCEPTION) << "Invalid index: " << index << " for CSRTensor: " << ToString();
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
TensorPtr COOTensor::GetTensorAt(size_t index) const {
|
||||
|
|
|
@ -174,7 +174,6 @@ class MS_CORE_API Tensor final : public MetaTensor {
|
|||
///
|
||||
/// \param[in] tensor [Tensor] The input tensor.
|
||||
explicit Tensor(const Tensor &tensor);
|
||||
|
||||
/// \brief Create tensor with given data type from another tensor.
|
||||
///
|
||||
/// \param[in] tensor [Tensor] The input tensor.
|
||||
|
|
|
@ -406,16 +406,16 @@ void MSANFModelParser::SetCNodePrimAttrAndAbstract(const mind_ir::NodeProto &nod
|
|||
abstract::AbstractTensorPtr MSANFModelParser::GetAbsTensorFromTensorProto(const mind_ir::TensorProto &tensor_proto) {
|
||||
ShapeVector shape;
|
||||
for (int i = 0; i < tensor_proto.dims_size(); ++i) {
|
||||
shape.emplace_back(tensor_proto.dims(i));
|
||||
(void)shape.emplace_back(tensor_proto.dims(i));
|
||||
}
|
||||
ShapeVector min_shape;
|
||||
for (int i = 0; i < tensor_proto.min_dims_size(); ++i) {
|
||||
min_shape.emplace_back(tensor_proto.min_dims(i));
|
||||
(void)min_shape.emplace_back(tensor_proto.min_dims(i));
|
||||
}
|
||||
|
||||
ShapeVector max_shape;
|
||||
for (int i = 0; i < tensor_proto.max_dims_size(); ++i) {
|
||||
max_shape.emplace_back(tensor_proto.max_dims(i));
|
||||
(void)max_shape.emplace_back(tensor_proto.max_dims(i));
|
||||
}
|
||||
|
||||
if (!tensor_proto.has_data_type()) {
|
||||
|
@ -504,36 +504,38 @@ bool MSANFModelParser::GetTensorDataFromExternal(const mind_ir::TensorProto &ten
|
|||
return false;
|
||||
}
|
||||
data = plain_data.get();
|
||||
tenor_data_.emplace(tensor_proto.external_data().location(), std::move(plain_data));
|
||||
(void)tenor_data_.emplace(tensor_proto.external_data().location(), std::move(plain_data));
|
||||
} else {
|
||||
// Read file
|
||||
std::basic_ifstream<char> fid(file, std::ios::in | std::ios::binary);
|
||||
if (!fid) {
|
||||
MS_LOG(EXCEPTION) << "Open file '" << file << "' failed, please check the correct of the file.";
|
||||
}
|
||||
fid.seekg(0, std::ios_base::end);
|
||||
(void)fid.seekg(0, std::ios_base::end);
|
||||
size_t file_size = static_cast<size_t>(fid.tellg());
|
||||
fid.clear();
|
||||
fid.seekg(0);
|
||||
(void)fid.seekg(0);
|
||||
auto plain_data = std::make_unique<char[]>(file_size);
|
||||
constexpr Byte is_little_endian = 1;
|
||||
constexpr int byte_order_index = 0;
|
||||
fid.read(plain_data.get(), file_size);
|
||||
(void)fid.read(plain_data.get(), SizeToLong(file_size));
|
||||
fid.close();
|
||||
// if byte order is not same return false
|
||||
if ((plain_data[byte_order_index] == is_little_endian) != little_endian()) {
|
||||
if ((plain_data[byte_order_index] == is_little_endian) ^ little_endian()) {
|
||||
MS_LOG(ERROR) << "The byte order of export MindIr device and load MindIr device is not same!";
|
||||
return false;
|
||||
}
|
||||
data = reinterpret_cast<const unsigned char *>(plain_data.get());
|
||||
tenor_data_.emplace(tensor_proto.external_data().location(),
|
||||
std::unique_ptr<Byte[]>(reinterpret_cast<Byte *>(plain_data.release())));
|
||||
(void)tenor_data_.emplace(tensor_proto.external_data().location(),
|
||||
std::unique_ptr<Byte[]>(reinterpret_cast<Byte *>(plain_data.release())));
|
||||
}
|
||||
}
|
||||
auto *tensor_data_buf = reinterpret_cast<uint8_t *>(tensor_info->data_c());
|
||||
MS_EXCEPTION_IF_NULL(tensor_data_buf);
|
||||
auto ret = common::huge_memcpy(tensor_data_buf, tensor_info->data().nbytes(),
|
||||
data + tensor_proto.external_data().offset(), tensor_proto.external_data().length());
|
||||
MS_EXCEPTION_IF_NULL(data);
|
||||
auto ret =
|
||||
common::huge_memcpy(tensor_data_buf, tensor_info->data().nbytes(), data + tensor_proto.external_data().offset(),
|
||||
LongToSize(tensor_proto.external_data().length()));
|
||||
if (ret != 0) {
|
||||
MS_LOG(ERROR) << "Build parameter occur memcpy_s error.";
|
||||
return false;
|
||||
|
@ -1065,15 +1067,15 @@ bool MSANFModelParser::GetAttrValueForValueNodeWithType(const std::string &value
|
|||
mind_ir::TensorProto tensor_proto = attr_proto.tensors(0);
|
||||
if (tensor_proto.has_raw_data()) {
|
||||
// For real tensor.
|
||||
ObtainValueNodeInTensorForm(value_node_name, tensor_proto);
|
||||
(void)ObtainValueNodeInTensorForm(value_node_name, tensor_proto);
|
||||
} else {
|
||||
// For data type.
|
||||
ObtainValueNodeInTypeForm(value_node_name, tensor_proto);
|
||||
(void)ObtainValueNodeInTypeForm(value_node_name, tensor_proto);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case mind_ir::AttributeProto_AttributeType_NONE: {
|
||||
ObtainValueNodeInNoneForm(value_node_name);
|
||||
(void)ObtainValueNodeInNoneForm(value_node_name);
|
||||
break;
|
||||
}
|
||||
case mind_ir::AttributeProto_AttributeType_UMONAD: {
|
||||
|
@ -1158,7 +1160,7 @@ bool MSANFModelParser::GetAttrValueForValueNode(const std::string &value_node_na
|
|||
break;
|
||||
}
|
||||
case FORM_PARSE_NONE: {
|
||||
ObtainValueNodeInNoneForm(value_node_name);
|
||||
(void)ObtainValueNodeInNoneForm(value_node_name);
|
||||
break;
|
||||
}
|
||||
case FORM_PARSE_MONAD: {
|
||||
|
@ -1350,10 +1352,6 @@ CNodePtr MSANFModelParser::BuildCNodeForFuncGraph(const FuncGraphPtr &outputFunc
|
|||
bool MSANFModelParser::BuildReturnForFuncGraph(const FuncGraphPtr &outputFuncGraph,
|
||||
const mind_ir::GraphProto &importProto) {
|
||||
MS_EXCEPTION_IF_NULL(outputFuncGraph);
|
||||
if (importProto.output_size() <= 0 || importProto.output_size() > INT_MAX) {
|
||||
MS_LOG(ERROR) << "importProto.output_size is: " << importProto.output_size();
|
||||
return false;
|
||||
}
|
||||
std::vector<AnfNodePtr> inputs;
|
||||
if (importProto.output_size() > 1) {
|
||||
inputs.push_back(NewValueNode(prim::kPrimMakeTuple));
|
||||
|
@ -1402,10 +1400,6 @@ bool MSANFModelParser::BuildReturnForFuncGraph(const FuncGraphPtr &outputFuncGra
|
|||
bool MSANFModelParser::ImportNodesForGraph(const FuncGraphPtr &outputFuncGraph,
|
||||
const mind_ir::GraphProto &importProto) {
|
||||
MS_EXCEPTION_IF_NULL(outputFuncGraph);
|
||||
if (importProto.node_size() < 0 || importProto.node_size() > INT_MAX) {
|
||||
MS_LOG(ERROR) << "importProto.node_size is: " << importProto.node_size();
|
||||
return false;
|
||||
}
|
||||
MS_LOG(DEBUG) << "The node size: " << importProto.node_size();
|
||||
CNodePtr cnode_ptr = nullptr;
|
||||
for (int i = 0; i < importProto.node_size(); ++i) {
|
||||
|
@ -1514,11 +1508,10 @@ bool MSANFModelParser::SetValueForTopGraphParameter(const FuncGraphPtr &topGraph
|
|||
const std::map<std::string, ValuePtr> &weights) {
|
||||
size_t fv_param_count = 0;
|
||||
auto parameters = topGraph->parameters();
|
||||
for (int i = parameters.size() - 1; i >= 0; --i) {
|
||||
size_t index = IntToSize(i);
|
||||
auto parameter = parameters[index]->cast<ParameterPtr>();
|
||||
for (int64_t i = SizeToLong(parameters.size()) - 1; i >= 0; --i) {
|
||||
auto parameter = parameters[i]->cast<ParameterPtr>();
|
||||
if (parameter == nullptr) {
|
||||
MS_LOG(ERROR) << "AnfNode " << parameters[index]->DebugString() << " should be Parameter.";
|
||||
MS_LOG(ERROR) << "AnfNode " << parameters[i]->DebugString() << " should be Parameter.";
|
||||
return false;
|
||||
}
|
||||
auto type = parameter->Type();
|
||||
|
@ -1633,7 +1626,7 @@ const LayoutMap MSANFModelParser::ParseLayout(const mind_ir::ModelProto &model_p
|
|||
}
|
||||
std::vector<int64_t> tensor_map;
|
||||
for (int num = 0; num < layout_proto.tensor_map_int_size(); ++num) {
|
||||
tensor_map.emplace_back(layout_proto.tensor_map_int(num));
|
||||
(void)tensor_map.emplace_back(layout_proto.tensor_map_int(num));
|
||||
}
|
||||
std::vector<int64_t> slice_shape;
|
||||
for (int num = 0; num < layout_proto.slice_shape_int_size(); ++num) {
|
||||
|
@ -1759,7 +1752,7 @@ abstract::AbstractBasePtr MSANFModelParser::BuildAbstractFunction(const mind_ir:
|
|||
MS_LOG(WARNING) << "Can't get the abstract of function union closure: " << item_proto.DebugString();
|
||||
return nullptr;
|
||||
}
|
||||
func_list.emplace_back(item_abstract->cast<abstract::AbstractFuncAtomPtr>());
|
||||
(void)func_list.emplace_back(item_abstract->cast<abstract::AbstractFuncAtomPtr>());
|
||||
}
|
||||
return std::make_shared<abstract::AbstractFuncUnion>(func_list);
|
||||
}
|
||||
|
@ -1777,7 +1770,7 @@ void MSANFModelParser::CorrectFuncGraph(const FuncGraphPtr &root) {
|
|||
auto valid =
|
||||
std::all_of(inputs.begin(), inputs.end(), [](const AnfNodePtr &arg) -> bool { return arg->abstract() != nullptr; });
|
||||
if (valid) {
|
||||
ValidMindir(root);
|
||||
(void)ValidMindir(root);
|
||||
} else {
|
||||
MS_LOG(INFO) << "There are some nullptr of abstract in the top function graph parameters." << root->DumpText();
|
||||
}
|
||||
|
|
|
@ -64,7 +64,7 @@ using LayoutMap = std::map<string, LayoutPtr>;
|
|||
|
||||
class MSANFModelParser {
|
||||
public:
|
||||
MSANFModelParser() : producer_name_(""), model_version_(""), ir_version_("") {}
|
||||
MSANFModelParser() = default;
|
||||
~MSANFModelParser() = default;
|
||||
|
||||
static void LoadTensorMapClear() { load_tensor_map_.clear(); }
|
||||
|
@ -92,7 +92,7 @@ class MSANFModelParser {
|
|||
bool BuildAttrForFuncGraph(const FuncGraphPtr &outputFuncGraph, const mind_ir::GraphProto &importProto);
|
||||
bool ImportParametersForGraph(const FuncGraphPtr &outputFuncGraph, const mind_ir::GraphProto &importProto);
|
||||
bool ImportNodesForGraph(const FuncGraphPtr &outputFuncGraph, const mind_ir::GraphProto &importProto);
|
||||
bool BuildParameterForFuncGraph(const ParameterPtr &node, const mind_ir::TensorProto &tensor_proto);
|
||||
bool BuildParameterForFuncGraph(const ParameterPtr &node, const mind_ir::TensorProto ¶meter_proto);
|
||||
bool SetValueForTopGraphParameter(const FuncGraphPtr &topGraph, const std::map<std::string, ValuePtr> &weights);
|
||||
bool GetTensorDataFromExternal(const mind_ir::TensorProto &tensor_proto, const tensor::TensorPtr &tensor_info);
|
||||
bool BuildInputForFuncGraph(const ParameterPtr &node, const mind_ir::ValueInfoProto &value_proto);
|
||||
|
@ -116,13 +116,13 @@ class MSANFModelParser {
|
|||
void SetCNodePrimAttrAndAbstract(const mind_ir::NodeProto &node_proto, const CNodePtr &cnode_ptr);
|
||||
bool ObtainValueNodeInTensorForm(const string &value_node_name, const mind_ir::TensorProto &attr_tensor);
|
||||
bool ObtainValueNodeInTupleTensorForm(const string &value_node_name, const mind_ir::AttributeProto &attr_proto);
|
||||
bool GetAttrValueForValueNode(const std::string &value_node_name, const mind_ir::AttributeProto &attr_tensor);
|
||||
bool GetAttrValueForValueNode(const std::string &value_node_name, const mind_ir::AttributeProto &attr_proto);
|
||||
bool GetAttrValueForValueNodeWithType(const std::string &value_node_name, const mind_ir::AttributeProto &attr_proto);
|
||||
bool ObtainValueNodeInTypeForm(const string &value_node_name, const mind_ir::TensorProto &attr_tensor);
|
||||
bool ObtainValueNodeInNoneForm(const std::string &value_node_name);
|
||||
bool ObtainValueNodeInMonadForm(const std::string &value_node_name, const mind_ir::AttributeProto &attr_proto);
|
||||
ValuePtr ObtainValueInSequenceForm(const mind_ir::AttributeProto &attr_proto);
|
||||
bool little_endian() { return little_endian_; }
|
||||
bool little_endian() const { return little_endian_; }
|
||||
mindspore::HashMap<std::string, abstract::AbstractBasePtr> GetAbstractForNode(
|
||||
const mind_ir::AttributeProto &attr_proto);
|
||||
AnfNodePtr GetAnfNode(const std::string &node_name);
|
||||
|
@ -139,7 +139,7 @@ class MSANFModelParser {
|
|||
mindspore::HashMap<std::string, AnfNodePtr> anfnode_build_map_;
|
||||
std::string mindir_path_;
|
||||
const unsigned char *mindir_dec_key_{nullptr};
|
||||
size_t mindir_key_size_;
|
||||
size_t mindir_key_size_{0};
|
||||
std::string mindir_dec_mode_;
|
||||
bool little_endian_ = common::IsLittleByteOrder();
|
||||
std::map<std::string, std::unique_ptr<Byte[]>> tenor_data_;
|
||||
|
|
|
@ -50,7 +50,7 @@ class MindIREngine {
|
|||
void InferParameter(const AnfNodePtr &node);
|
||||
void InferValueNode(const AnfNodePtr &node);
|
||||
void InferCNode(const AnfNodePtr &node);
|
||||
void EvalAbstractFunction(const abstract::AbstractFuncAtomPtr &abstractFunc, const CNodePtr &node,
|
||||
void EvalAbstractFunction(const abstract::AbstractFuncAtomPtr &func, const CNodePtr &node,
|
||||
const AbstractBasePtrListPtr &args);
|
||||
void EvalPrimitiveAbastract(const abstract::PrimitiveAbstractClosurePtr &func, const CNodePtr &node,
|
||||
const AbstractBasePtrListPtr &args);
|
||||
|
|
|
@ -61,8 +61,8 @@ bool get_all_files(const std::string &dir_in, std::vector<std::string> *files) {
|
|||
return false;
|
||||
}
|
||||
if (S_ISDIR(st.st_mode)) {
|
||||
ret = static_cast<int>(get_all_files(name, files));
|
||||
if (!ret) {
|
||||
ret = get_all_files(name, files);
|
||||
if (ret) {
|
||||
MS_LOG(ERROR) << "Get files failed, ret is : " << ret;
|
||||
closedir(open_dir);
|
||||
return false;
|
||||
|
@ -182,7 +182,7 @@ std::vector<std::string> MindIRLoader::LoadPreprocess(const std::string &file_na
|
|||
return map_jsons;
|
||||
}
|
||||
|
||||
std::vector<FuncGraphPtr> MindIRLoader::LoadMindIRs(std::vector<std::string> file_names) {
|
||||
std::vector<FuncGraphPtr> MindIRLoader::LoadMindIRs(const std::vector<std::string> &file_names) {
|
||||
std::vector<FuncGraphPtr> funcgraph_vec;
|
||||
MS_LOG(DEBUG) << "Load multiple MindIR files.";
|
||||
for (const auto &file_name : file_names) {
|
||||
|
@ -245,12 +245,12 @@ FuncGraphPtr MindIRLoader::LoadMindIR(const std::string &file_name) {
|
|||
return nullptr;
|
||||
}
|
||||
// Load parameter into graph
|
||||
if (endsWith(std::string(abs_path_buff), "_graph.mindir") && origin_model.graph().parameter_size() == 0) {
|
||||
if (endsWith(std::string(abs_path_buff), "_graph.mindir") && (origin_model.graph().parameter_size() == 0)) {
|
||||
if (strlen(abs_path_buff) < strlen("graph.mindir")) {
|
||||
MS_LOG(ERROR) << "The abs_path_buff length is less than 'graph.mindir'.";
|
||||
return nullptr;
|
||||
}
|
||||
int path_len = SizeToInt(strlen(abs_path_buff) - strlen("graph.mindir"));
|
||||
size_t path_len = strlen(abs_path_buff) - strlen("graph.mindir");
|
||||
string var_path = std::string(abs_path_buff).substr(0, path_len);
|
||||
var_path += "variables";
|
||||
std::ifstream ifs(var_path);
|
||||
|
@ -270,10 +270,6 @@ FuncGraphPtr MindIRLoader::LoadMindIR(const std::string &file_name) {
|
|||
return nullptr;
|
||||
}
|
||||
|
||||
if (param_graph.parameter_size() < 0 || param_graph.parameter_size() > INT_MAX) {
|
||||
MS_LOG(ERROR) << "param_graph.parameter_size() is : " << param_graph.parameter_size();
|
||||
return nullptr;
|
||||
}
|
||||
for (int param_index = 0; param_index < param_graph.parameter_size(); param_index++) {
|
||||
mind_ir::TensorProto *param_proto = mod_graph->add_parameter();
|
||||
param_proto->set_name(param_graph.parameter(param_index).name());
|
||||
|
|
|
@ -42,7 +42,7 @@ class MS_CORE_API MindIRLoader {
|
|||
void InitModelParser(MSANFModelParser *model_parser);
|
||||
FuncGraphPtr LoadMindIR(const void *buffer, const size_t &size);
|
||||
FuncGraphPtr LoadMindIR(const std::string &file_name);
|
||||
std::vector<FuncGraphPtr> LoadMindIRs(const std::vector<std::string> file_names);
|
||||
std::vector<FuncGraphPtr> LoadMindIRs(const std::vector<std::string> &file_names);
|
||||
std::vector<std::string> LoadPreprocess(const std::string &file_name);
|
||||
|
||||
private:
|
||||
|
|
|
@ -26,8 +26,6 @@
|
|||
|
||||
namespace mindspore {
|
||||
namespace {
|
||||
const PrimitiveSet follow_first_input_prims = {prim::kPrimDepend, prim::kPrimLoad};
|
||||
|
||||
class AbstractMutexManager {
|
||||
public:
|
||||
static AbstractMutexManager &GetInstance() {
|
||||
|
@ -98,8 +96,7 @@ AnfNodePtr NewCustomActorNode(const CustomActorInfoPtr &actor_info, const FuncGr
|
|||
}
|
||||
} // namespace
|
||||
|
||||
AbstractScope::AbstractScope(std::recursive_mutex *mu) {
|
||||
mu_ = mu;
|
||||
AbstractScope::AbstractScope(std::recursive_mutex *mu) : mu_(mu) {
|
||||
if (mu_ != nullptr) {
|
||||
mu_->lock();
|
||||
}
|
||||
|
@ -397,6 +394,7 @@ int64_t AnfUtils::GetIntValue(const AnfNodePtr &anf_node) {
|
|||
|
||||
std::pair<AnfNodePtr, size_t> AnfUtils::VisitKernel(const AnfNodePtr &anf_node, size_t index) {
|
||||
MS_EXCEPTION_IF_NULL(anf_node);
|
||||
const PrimitiveSet follow_first_input_prims = {prim::kPrimDepend, prim::kPrimLoad};
|
||||
if (anf_node->isa<ValueNode>()) {
|
||||
return std::make_pair(anf_node, 0);
|
||||
} else if (anf_node->isa<Parameter>()) {
|
||||
|
|
|
@ -535,10 +535,10 @@ TypePtr CheckAndConvertUtils::CheckTensorTypeSame(const std::map<std::string, Ty
|
|||
std::set<string> order_set;
|
||||
for (auto const &valid_type : check_list) {
|
||||
if (valid_type->isa<TensorType>()) {
|
||||
order_set.emplace(valid_type->ToString());
|
||||
(void)order_set.emplace(valid_type->ToString());
|
||||
break;
|
||||
} else {
|
||||
order_set.emplace("Tensor[" + valid_type->ToString() + "]");
|
||||
(void)order_set.emplace("Tensor[" + valid_type->ToString() + "]");
|
||||
}
|
||||
}
|
||||
for (auto const &error_item : order_set) {
|
||||
|
@ -592,7 +592,6 @@ TypePtr CheckAndConvertUtils::CheckSparseTensorTypeValid(const std::string &type
|
|||
MS_EXCEPTION(TypeError) << "For Primitive[" << prim_name << "], the input argument[" << type_name
|
||||
<< "] cast to SparseTensorTypePtr failed! Get type : " << type->ToString() << ".";
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
ShapeVector CheckAndConvertUtils::CheckTensorIntValue(const std::string &type_name, const ValuePtr &value,
|
||||
|
@ -645,16 +644,16 @@ TypePtr CheckAndConvertUtils::CheckTensorSubClass(const string &type_name, const
|
|||
|
||||
if (is_mix) {
|
||||
for (const auto &item : template_types) {
|
||||
order_set.emplace(item->ToString());
|
||||
(void)order_set.emplace(item->ToString());
|
||||
}
|
||||
}
|
||||
|
||||
for (const auto &item : template_types) {
|
||||
if (item->isa<TensorType>()) {
|
||||
order_set.emplace(item->ToString());
|
||||
(void)order_set.emplace(item->ToString());
|
||||
continue;
|
||||
}
|
||||
order_set.emplace("Tensor[" + item->ToString() + "]");
|
||||
(void)order_set.emplace("Tensor[" + item->ToString() + "]");
|
||||
}
|
||||
|
||||
for (const auto &item : order_set) {
|
||||
|
@ -692,7 +691,7 @@ TypePtr CheckAndConvertUtils::CheckSubClass(const std::string &type_name, const
|
|||
|
||||
TypePtr CheckAndConvertUtils::CheckScalarOrTensorTypesSame(const std::map<std::string, TypePtr> &args,
|
||||
const std::set<TypePtr> &valid_values,
|
||||
const std::string &prim_name, const bool allow_mix) {
|
||||
const std::string &prim_name, bool allow_mix) {
|
||||
(void)_CheckTypeSame(args, prim_name, allow_mix);
|
||||
return CheckTensorSubClass(args.begin()->first, args.begin()->second, valid_values, prim_name, true);
|
||||
}
|
||||
|
|
|
@ -26,7 +26,6 @@
|
|||
#include "abstract/param_validator.h"
|
||||
#include "base/base.h"
|
||||
#include "ir/anf.h"
|
||||
#include "ir/dtype/type_id.h"
|
||||
#include "include/api/format.h"
|
||||
#include "utils/log_adapter.h"
|
||||
#if __has_include("include/mindapi/base/types.h")
|
||||
|
@ -301,12 +300,12 @@ class MS_CORE_API CheckAndConvertUtils {
|
|||
static void CheckSummaryParam(const AbstractBasePtr &name, const AbstractBasePtr &value,
|
||||
const std::string &class_name);
|
||||
static void CheckMode(const std::string &class_name);
|
||||
static std::vector<int64_t> CheckIntOrTupleInt(const std::string &prim_name, const ValuePtr &attr,
|
||||
const std::string &arg_name);
|
||||
static std::vector<int64_t> CheckTupleInt(const std::string &prim_name, const ValuePtr &attr,
|
||||
const std::string &arg_name);
|
||||
static std::vector<int64_t> CheckListInt(const std::string &prim_name, const ValuePtr &attr,
|
||||
const std::string &arg_name);
|
||||
static std::vector<int64_t> CheckIntOrTupleInt(const std::string &arg_name, const ValuePtr &attr,
|
||||
const std::string &prim_name);
|
||||
static std::vector<int64_t> CheckTupleInt(const std::string &arg_name, const ValuePtr &attr,
|
||||
const std::string &prim_name);
|
||||
static std::vector<int64_t> CheckListInt(const std::string &arg_name, const ValuePtr &attr,
|
||||
const std::string &prim_name);
|
||||
static void CheckMinMaxShape(const ShapeVector &shape, ShapeVector *min_shape, ShapeVector *max_shape);
|
||||
static int64_t GetAndCheckFormat(const ValuePtr &value);
|
||||
static size_t GetRemoveMonadAbsNum(const AbstractBasePtrList &abs_list);
|
||||
|
|
|
@ -77,7 +77,7 @@ class CompactSet {
|
|||
|
||||
T pop() {
|
||||
T e = std::move(data_.front());
|
||||
data_.erase(data_.begin());
|
||||
(void)data_.erase(data_.begin());
|
||||
return e;
|
||||
}
|
||||
|
||||
|
|
|
@ -15,10 +15,8 @@
|
|||
*/
|
||||
|
||||
#include "utils/info.h"
|
||||
#include <utility>
|
||||
#include <fstream>
|
||||
#include <sstream>
|
||||
#include <climits>
|
||||
#include "ir/anf.h"
|
||||
#include "ir/graph_utils.h"
|
||||
#include "ir/func_graph.h"
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
#include <iostream>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include "utils/hash_map.h"
|
||||
#include "ir/anf.h"
|
||||
|
||||
|
@ -27,7 +26,8 @@ namespace mindspore {
|
|||
namespace label_manage {
|
||||
enum class TraceLabelType { kShortSymbol, kFullName, kWithUniqueId };
|
||||
MS_CORE_API TraceLabelType GetGlobalTraceLabelType();
|
||||
MS_CORE_API std::string Label(const DebugInfoPtr &debug_info, TraceLabelType trace_type = TraceLabelType::kShortSymbol);
|
||||
MS_CORE_API std::string Label(const DebugInfoPtr &debug_info,
|
||||
TraceLabelType trace_label = TraceLabelType::kShortSymbol);
|
||||
} // namespace label_manage
|
||||
} // namespace mindspore
|
||||
|
||||
|
|
|
@ -17,8 +17,8 @@
|
|||
#ifndef MINDSPORE_CORE_UTILS_LOG_ADAPTER_H_
|
||||
#define MINDSPORE_CORE_UTILS_LOG_ADAPTER_H_
|
||||
|
||||
#include <stdarg.h>
|
||||
#include <stdint.h>
|
||||
#include <cstdarg>
|
||||
#include <cstdint>
|
||||
#include <string>
|
||||
#include <sstream>
|
||||
#include <memory>
|
||||
|
@ -45,7 +45,6 @@
|
|||
static constexpr size_t GetRelPathPos() noexcept {
|
||||
return sizeof(__FILE__) > sizeof(LOG_HDR_FILE_REL_PATH) ? sizeof(__FILE__) - sizeof(LOG_HDR_FILE_REL_PATH) : 0;
|
||||
}
|
||||
|
||||
namespace mindspore {
|
||||
/// \brief The handler map for ACL.
|
||||
MS_CORE_API extern std::map<void **, std::thread *> acl_handle_map;
|
||||
|
@ -231,11 +230,7 @@ MS_EXPORT extern enum MsLogLevel this_thread_max_log_level;
|
|||
MS_EXPORT extern thread_local enum MsLogLevel this_thread_max_log_level;
|
||||
class TryCatchGuard {
|
||||
public:
|
||||
TryCatchGuard() {
|
||||
origin_log_level_ = this_thread_max_log_level;
|
||||
this_thread_max_log_level = MsLogLevel::WARNING;
|
||||
}
|
||||
|
||||
TryCatchGuard() : origin_log_level_(this_thread_max_log_level) { this_thread_max_log_level = MsLogLevel::WARNING; }
|
||||
~TryCatchGuard() { this_thread_max_log_level = origin_log_level_; }
|
||||
|
||||
private:
|
||||
|
@ -330,11 +325,11 @@ inline bool IS_OUTPUT_ON(enum MsLogLevel level) noexcept(true) {
|
|||
} \
|
||||
} while (0)
|
||||
|
||||
#define MS_EXCEPTION_IF_ZERO(name, value) \
|
||||
do { \
|
||||
if (value == 0) { \
|
||||
MS_LOG(EXCEPTION) << ": The " << name << " is zero."; \
|
||||
} \
|
||||
#define MS_EXCEPTION_IF_ZERO(name, value) \
|
||||
do { \
|
||||
if ((value) == 0) { \
|
||||
MS_LOG(EXCEPTION) << ": The " << (name) << " is zero."; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define MS_ERROR_IF_NULL(ptr) \
|
||||
|
|
|
@ -17,7 +17,6 @@
|
|||
#ifndef MINDSPORE_CORE_UTILS_MISC_H_
|
||||
#define MINDSPORE_CORE_UTILS_MISC_H_
|
||||
|
||||
#include <list>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <sstream>
|
||||
|
|
|
@ -15,11 +15,11 @@
|
|||
*/
|
||||
|
||||
#include "utils/ms_context.h"
|
||||
#include <cstdlib>
|
||||
#include <thread>
|
||||
#include <atomic>
|
||||
#include <fstream>
|
||||
#include <algorithm>
|
||||
#include <utility>
|
||||
#include "ir/tensor.h"
|
||||
#include "utils/ms_utils.h"
|
||||
|
||||
|
|
|
@ -20,14 +20,11 @@
|
|||
#include <memory>
|
||||
#include <map>
|
||||
#include <set>
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
#include <functional>
|
||||
#include "utils/log_adapter.h"
|
||||
#include "utils/ms_utils.h"
|
||||
#ifdef ENABLE_TDTQUE
|
||||
#include "pybind11/pybind11.h"
|
||||
#include "minddata/dataset/engine/device_queue_impl/tdt/tdt_handle.h"
|
||||
using mindspore::dataset::TdtHandle;
|
||||
#endif
|
||||
|
@ -146,7 +143,7 @@ enum MsCtxParam : unsigned {
|
|||
|
||||
class MS_CORE_API MsContext {
|
||||
public:
|
||||
MsContext(const std::string &backend_policy, const std::string &target);
|
||||
MsContext(const std::string &policy, const std::string &target);
|
||||
~MsContext() = default;
|
||||
MsContext(const MsContext &) = delete;
|
||||
MsContext &operator=(const MsContext &) = delete;
|
||||
|
|
|
@ -98,9 +98,9 @@ class OrderedSet {
|
|||
// insert an element to the end of OrderedSet.
|
||||
std::pair<iterator, bool> insert(const element_type &e) { return insert(ordered_data_.end(), e); }
|
||||
|
||||
void push_back(const element_type &e) { (void)insert(ordered_data_.end(), e); }
|
||||
void push_back(const element_type &e) const { (void)insert(ordered_data_.end(), e); }
|
||||
|
||||
void push_front(const element_type &e) { (void)insert(ordered_data_.begin(), e); }
|
||||
void push_front(const element_type &e) const { (void)insert(ordered_data_.begin(), e); }
|
||||
|
||||
// Remove an element, if removed return true, otherwise return false
|
||||
bool erase(const element_type &e) {
|
||||
|
@ -428,7 +428,7 @@ class OrderedSet<std::shared_ptr<T>> {
|
|||
const element_type &front() const { return ordered_data_.front(); }
|
||||
|
||||
// Return true if there are no common elements.
|
||||
bool is_disjoint(const OrderedSet &other) {
|
||||
bool is_disjoint(const OrderedSet &other) const {
|
||||
return std::all_of(begin(), end(), [&other](const auto &e) { return !other.contains(e); });
|
||||
}
|
||||
|
||||
|
|
|
@ -34,7 +34,9 @@ std::ostream &operator<<(std::ostream &out, const std::vector<T> &v) {
|
|||
size_t last = v.size() - 1;
|
||||
for (size_t i = 0; i < v.size(); ++i) {
|
||||
out << v[i];
|
||||
if (i != last) out << ", ";
|
||||
if (i != last) {
|
||||
out << ", ";
|
||||
}
|
||||
}
|
||||
out << "]";
|
||||
return out;
|
||||
|
|
|
@ -192,9 +192,9 @@ void Profile::Pop(void) noexcept {
|
|||
ctx_ptr_ = ctx_ptr_->parent_;
|
||||
}
|
||||
|
||||
ProfContext::ProfContext(const std::string &name, ProfileBase *const prof) : name_(name), prof_(prof) {
|
||||
ProfContext::ProfContext(const std::string &name, ProfileBase *const prof)
|
||||
: name_(name), prof_(prof), time_info_(nullptr) {
|
||||
// Initialize a subcontext.
|
||||
time_info_ = nullptr;
|
||||
if (prof == nullptr || IsTopContext()) {
|
||||
parent_ = nullptr;
|
||||
} else {
|
||||
|
|
|
@ -35,6 +35,7 @@ class ProfileBase;
|
|||
struct TimeInfo {
|
||||
explicit TimeInfo(double time = -1.0) : time_(time), dict_(nullptr), actionNum_(0) {}
|
||||
TimeInfo(const TimeInfo &) = delete;
|
||||
TimeInfo &operator=(const TimeInfo &) = delete;
|
||||
~TimeInfo();
|
||||
|
||||
double time_;
|
||||
|
@ -49,7 +50,7 @@ class MS_CORE_API ProfContext {
|
|||
friend class ProfTransaction;
|
||||
|
||||
public:
|
||||
ProfContext(const std::string &name, ProfileBase *prof);
|
||||
ProfContext(const std::string &name, ProfileBase *const prof);
|
||||
~ProfContext();
|
||||
|
||||
ProfContext(const ProfContext &) = delete;
|
||||
|
@ -103,6 +104,7 @@ class MS_CORE_API ProfTransaction {
|
|||
explicit ProfTransaction(const ProfileBase *prof);
|
||||
explicit ProfTransaction(ProfContext *const ctx) : ctx_(ctx) {}
|
||||
ProfTransaction(const ProfTransaction &) = delete;
|
||||
ProfTransaction &operator=(const ProfTransaction &) = delete;
|
||||
~ProfTransaction();
|
||||
|
||||
template <class Function>
|
||||
|
@ -146,7 +148,7 @@ class MS_CORE_API DumpTime {
|
|||
DumpTime &operator=(const DumpTime &) = delete;
|
||||
static DumpTime &GetInstance();
|
||||
void set_file_path(const std::string &save_path) { file_path_ = save_path; }
|
||||
void Record(const std::string &name, const double time, const bool is_start);
|
||||
void Record(const std::string &step_name, const double time, const bool is_start);
|
||||
void Save();
|
||||
|
||||
private:
|
||||
|
@ -157,10 +159,7 @@ class MS_CORE_API DumpTime {
|
|||
};
|
||||
|
||||
struct TimeStat {
|
||||
TimeStat() {
|
||||
time_ = 0.0;
|
||||
count_ = 0;
|
||||
}
|
||||
TimeStat() : time_(0.0), count_(0) {}
|
||||
~TimeStat() = default;
|
||||
|
||||
void operator+=(double t) {
|
||||
|
|
|
@ -42,7 +42,7 @@ template <class FuncType>
|
|||
class Signal {
|
||||
public:
|
||||
template <class... Args>
|
||||
void operator()(Args &&... args) {
|
||||
void operator()(Args &&... args) const {
|
||||
for (auto &slot : slots_) {
|
||||
if (slot->callback != nullptr) {
|
||||
slot->callback(std::forward<Args>(args)...);
|
||||
|
@ -66,4 +66,4 @@ class Signal {
|
|||
};
|
||||
} // namespace mindspore
|
||||
|
||||
#endif // MINDSPORE_CORE_UTILS_EVENT_H_
|
||||
#endif // MINDSPORE_CORE_UTILS_SIGNAL_H_
|
||||
|
|
|
@ -97,7 +97,7 @@ class Platform {
|
|||
// check the null point
|
||||
#define MS_EXCEPT_CHECK_NULL(value) \
|
||||
do { \
|
||||
if (value == nullptr) { \
|
||||
if ((value) == nullptr) { \
|
||||
MS_LOG(EXCEPTION) << "The value is null."; \
|
||||
} \
|
||||
} while (0)
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
#ifndef MINDSPORE_CORE_UTILS_SYSTEM_CRC32C_H_
|
||||
#define MINDSPORE_CORE_UTILS_SYSTEM_CRC32C_H_
|
||||
|
||||
#include <stddef.h>
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
#include "utils/system/base.h"
|
||||
#include "utils/system/env.h"
|
||||
|
@ -26,7 +26,7 @@
|
|||
namespace mindspore {
|
||||
namespace system {
|
||||
// Align n to (1 << m) byte boundary
|
||||
#define MEM_ALIGN(n, m) ((n + ((1 << (m)) - 1)) & ~((1 << (m)) - 1))
|
||||
#define MEM_ALIGN(n, m) (((n) + ((1 << (m)) - 1)) & (~((1 << (m)) - 1)))
|
||||
|
||||
// Masked for crc.
|
||||
static constexpr uint32 kMaskDelta = 0xa282ead8ul;
|
||||
|
|
|
@ -17,8 +17,8 @@
|
|||
#ifndef MINDSPORE_CORE_UTILS_SYSTEM_FILE_SYSTEM_H_
|
||||
#define MINDSPORE_CORE_UTILS_SYSTEM_FILE_SYSTEM_H_
|
||||
|
||||
#include <errno.h>
|
||||
#include <sys/param.h>
|
||||
#include <cerrno>
|
||||
#include <cstdint>
|
||||
#include <cstdlib>
|
||||
#include <cstdio>
|
||||
|
|
|
@ -19,9 +19,9 @@
|
|||
#include <map>
|
||||
#include <functional>
|
||||
namespace mindspore {
|
||||
tensor::TensorPtr TensorConstructUtils::CreateZerosTensor(const TypePtr &type_ptr, const std::vector<int64_t> &shape) {
|
||||
MS_EXCEPTION_IF_NULL(type_ptr);
|
||||
auto type_id = ExtractTypeId(type_ptr);
|
||||
tensor::TensorPtr TensorConstructUtils::CreateZerosTensor(const TypePtr &type, const std::vector<int64_t> &shape) {
|
||||
MS_EXCEPTION_IF_NULL(type);
|
||||
auto type_id = ExtractTypeId(type);
|
||||
tensor::TensorPtr tensor = std::make_shared<tensor::Tensor>(type_id, shape);
|
||||
size_t mem_size = IntToSize(tensor->ElementsNum());
|
||||
auto tensor_data = tensor->data_c();
|
||||
|
@ -32,9 +32,9 @@ tensor::TensorPtr TensorConstructUtils::CreateZerosTensor(const TypePtr &type_pt
|
|||
return tensor;
|
||||
}
|
||||
|
||||
tensor::TensorPtr TensorConstructUtils::CreateOnesTensor(const TypePtr &type_ptr, const std::vector<int64_t> &shape) {
|
||||
MS_EXCEPTION_IF_NULL(type_ptr);
|
||||
auto type_id = ExtractTypeId(type_ptr);
|
||||
tensor::TensorPtr TensorConstructUtils::CreateOnesTensor(const TypePtr &type, const std::vector<int64_t> &shape) {
|
||||
MS_EXCEPTION_IF_NULL(type);
|
||||
auto type_id = ExtractTypeId(type);
|
||||
tensor::TensorPtr tensor = std::make_shared<tensor::Tensor>(type_id, shape);
|
||||
const size_t &mem_size = IntToSize(tensor->ElementsNum());
|
||||
auto tensor_data = tensor->data_c();
|
||||
|
@ -65,30 +65,31 @@ tensor::TensorPtr TensorConstructUtils::CreateOnesTensor(const TypePtr &type_ptr
|
|||
};
|
||||
|
||||
const auto &tensor_type = tensor->data_type();
|
||||
if (!type_dict.count(tensor_type)) {
|
||||
auto iter = type_dict.find(tensor_type);
|
||||
if (iter == type_dict.end()) {
|
||||
MS_LOG(EXCEPTION) << "unsupported data type: " << tensor_type;
|
||||
}
|
||||
type_dict[tensor_type]();
|
||||
iter->second();
|
||||
return tensor;
|
||||
}
|
||||
|
||||
tensor::TensorPtr TensorConstructUtils::CreateTensor(const TypePtr &type_ptr, const std::vector<int64_t> &shape,
|
||||
tensor::TensorPtr TensorConstructUtils::CreateTensor(const TypePtr &type, const std::vector<int64_t> &shape,
|
||||
void *data) {
|
||||
MS_EXCEPTION_IF_NULL(type_ptr);
|
||||
auto type_id = ExtractTypeId(type_ptr);
|
||||
MS_EXCEPTION_IF_NULL(type);
|
||||
auto type_id = ExtractTypeId(type);
|
||||
tensor::TensorPtr tensor = std::make_shared<tensor::Tensor>(type_id, shape, data, type_id);
|
||||
return tensor;
|
||||
}
|
||||
|
||||
TypeId TensorConstructUtils::ExtractTypeId(const TypePtr &type_ptr) {
|
||||
MS_EXCEPTION_IF_NULL(type_ptr);
|
||||
TypeId TensorConstructUtils::ExtractTypeId(const TypePtr &type) {
|
||||
MS_EXCEPTION_IF_NULL(type);
|
||||
TypeId type_id;
|
||||
if (type_ptr->isa<TensorType>()) {
|
||||
auto tensor_type = type_ptr->cast<TensorTypePtr>();
|
||||
if (type->isa<TensorType>()) {
|
||||
auto tensor_type = type->cast<TensorTypePtr>();
|
||||
MS_EXCEPTION_IF_NULL(tensor_type);
|
||||
type_id = tensor_type->element()->type_id();
|
||||
} else {
|
||||
type_id = type_ptr->type_id();
|
||||
type_id = type->type_id();
|
||||
}
|
||||
return type_id;
|
||||
}
|
||||
|
|
|
@ -195,7 +195,7 @@ void GetFusedDebugInfos(const NodeDebugInfoSet &fused_debug_infos, std::vector<s
|
|||
std::vector<std::string> debug_info_vec_str;
|
||||
GetSourceLineFromDebugInfo(debug_info, &debug_info_vec_str, kSectionPrefix);
|
||||
if (!debug_info_vec_str.empty()) {
|
||||
(void)result->insert(result->end(), debug_info_vec_str.begin(), debug_info_vec_str.end());
|
||||
(void)result->insert(result->cend(), debug_info_vec_str.cbegin(), debug_info_vec_str.cend());
|
||||
is_empty = false;
|
||||
}
|
||||
}
|
||||
|
@ -215,7 +215,7 @@ void GetPrimalDebugInfos(const CNodePtr &cnode, std::vector<std::string> *result
|
|||
std::vector<std::string> debug_info_vec_str;
|
||||
GetSourceLineFromDebugInfo(primal_debug_info, &debug_info_vec_str, kSectionPrefix);
|
||||
if (!debug_info_vec_str.empty()) {
|
||||
(void)result->insert(result->end(), debug_info_vec_str.begin(), debug_info_vec_str.end());
|
||||
(void)result->insert(result->cend(), debug_info_vec_str.cbegin(), debug_info_vec_str.cend());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,8 +20,6 @@
|
|||
#include <memory>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include <utility>
|
||||
#include <stack>
|
||||
|
||||
#include "utils/info.h"
|
||||
#include "ir/anf.h"
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
#include <string>
|
||||
#include <memory>
|
||||
#include <utility>
|
||||
|
||||
namespace mindspore {
|
||||
class TraceInfo;
|
||||
|
@ -33,6 +32,7 @@ class TraceInfo {
|
|||
public:
|
||||
explicit TraceInfo(const DebugInfoPtr &info) : debug_info_(info) {}
|
||||
TraceInfo(const TraceInfo &other) = default;
|
||||
TraceInfo &operator=(const TraceInfo &) = default;
|
||||
virtual ~TraceInfo() = default;
|
||||
virtual std::string name() const { return ""; }
|
||||
virtual std::string symbol() const { return ""; }
|
||||
|
@ -66,6 +66,7 @@ class TracePhi : public TraceInfo {
|
|||
class TraceIfStmtTrueBranch : public TraceInfo {
|
||||
public:
|
||||
TraceIfStmtTrueBranch(const TraceIfStmtTrueBranch &) = default;
|
||||
TraceIfStmtTrueBranch &operator=(const TraceIfStmtTrueBranch &) = default;
|
||||
explicit TraceIfStmtTrueBranch(const DebugInfoPtr &info) : TraceInfo(info) {}
|
||||
~TraceIfStmtTrueBranch() override = default;
|
||||
MS_DECLARE_TRACE_NAME_SYMBOL("if_true", "✓");
|
||||
|
@ -75,6 +76,7 @@ class TraceIfStmtTrueBranch : public TraceInfo {
|
|||
class TraceIfStmtFalseBranch : public TraceInfo {
|
||||
public:
|
||||
TraceIfStmtFalseBranch(const TraceIfStmtFalseBranch &) = default;
|
||||
TraceIfStmtFalseBranch &operator=(const TraceIfStmtFalseBranch &) = default;
|
||||
explicit TraceIfStmtFalseBranch(const DebugInfoPtr &info) : TraceInfo(info) {}
|
||||
~TraceIfStmtFalseBranch() override = default;
|
||||
MS_DECLARE_TRACE_NAME_SYMBOL("if_false", "✗");
|
||||
|
|
Loading…
Reference in New Issue