!38673 fix code check

Merge pull request !38673 from lianliguang/master
This commit is contained in:
i-robot 2022-07-27 02:39:22 +00:00 committed by Gitee
commit abef6409dc
No known key found for this signature in database
GPG Key ID: 173E9B9CA92EEF8F
31 changed files with 111 additions and 136 deletions

View File

@ -142,7 +142,7 @@ class MS_API Status {
private: private:
// api without std::string // api without std::string
Status(enum StatusCode status_code, const std::vector<char> &status_msg); Status(enum StatusCode status_code, const std::vector<char> &status_msg);
Status(const enum StatusCode code, int line_of_code, const char *file_name, const std::vector<char> &extra); Status(enum StatusCode code, int line_of_code, const char *file_name, const std::vector<char> &extra);
std::vector<char> ToCString() const; std::vector<char> ToCString() const;
std::vector<char> GetErrDescriptionChar() const; std::vector<char> GetErrDescriptionChar() const;
std::vector<char> SetErrDescription(const std::vector<char> &err_description); std::vector<char> SetErrDescription(const std::vector<char> &err_description);

View File

@ -14,6 +14,7 @@
* limitations under the License. * limitations under the License.
*/ */
#include "cxx_api/model/model_impl.h" #include "cxx_api/model/model_impl.h"
#include <string>
#include "cxx_api/dlutils.h" #include "cxx_api/dlutils.h"
namespace mindspore { namespace mindspore {

View File

@ -197,7 +197,7 @@ class TensorStringifier {
TensorStringifier(const T *data, size_t data_size, size_t ndim) : data_(data), data_size_(data_size), ndim_(ndim) {} TensorStringifier(const T *data, size_t data_size, size_t ndim) : data_(data), data_size_(data_size), ndim_(ndim) {}
~TensorStringifier() = default; ~TensorStringifier() = default;
std::string ToString(TypeId type, const ShapeVector &shape, bool use_comma) const { std::string ToString(TypeId, const ShapeVector &shape, bool use_comma) const {
constexpr auto valid = constexpr auto valid =
std::is_same<T, bool>::value || std::is_same<T, uint8_t>::value || std::is_same<T, int8_t>::value || std::is_same<T, bool>::value || std::is_same<T, uint8_t>::value || std::is_same<T, int8_t>::value ||
std::is_same<T, int16_t>::value || std::is_same<T, int32_t>::value || std::is_same<T, int64_t>::value || std::is_same<T, int16_t>::value || std::is_same<T, int32_t>::value || std::is_same<T, int64_t>::value ||
@ -214,7 +214,8 @@ class TensorStringifier {
std::ostringstream ss; std::ostringstream ss;
if (data_size_ == 1 && ndim_ == 0) { // Scalar if (data_size_ == 1 && ndim_ == 0) { // Scalar
OutputDataString(ss, 0, 0, 1, false, 0); int max = 0;
OutputDataString(ss, 0, 0, 1, false, &max);
return ss.str(); return ss.str();
} }
@ -919,7 +920,7 @@ static std::map<TypeId, std::vector<TensorChunk>> GroupingTensors(const TensorPt
auto &chunk = chunks.back(); auto &chunk = chunks.back();
chunk.size += tensor->DataSize(); chunk.size += tensor->DataSize();
chunk.bytes += tensor_bytes; chunk.bytes += tensor_bytes;
chunk.tensors.emplace_back(tensor); (void)chunk.tensors.emplace_back(tensor);
} }
return group_info; return group_info;
} }
@ -980,7 +981,7 @@ TensorPtrList Tensor::GetFlattenedTensors(const TensorPtrList &tensors) {
TensorPtrList result_tensors; TensorPtrList result_tensors;
for (auto &entry : chunk_map) { for (auto &entry : chunk_map) {
auto &chunk_tensors = entry.second; auto &chunk_tensors = entry.second;
result_tensors.insert(result_tensors.end(), chunk_tensors.begin(), chunk_tensors.end()); (void)result_tensors.insert(result_tensors.end(), chunk_tensors.begin(), chunk_tensors.end());
} }
return result_tensors; return result_tensors;
} }
@ -1052,7 +1053,6 @@ const size_t CSRTensor::GetSizeAt(size_t index) const {
return sizeof(int64_t); return sizeof(int64_t);
} }
MS_LOG(EXCEPTION) << "Invalid index: " << index << " for CSRTensor: " << ToString(); MS_LOG(EXCEPTION) << "Invalid index: " << index << " for CSRTensor: " << ToString();
return kTypeUnknown;
} }
TensorPtr CSRTensor::GetTensorAt(size_t index) const { TensorPtr CSRTensor::GetTensorAt(size_t index) const {
@ -1069,7 +1069,6 @@ TensorPtr CSRTensor::GetTensorAt(size_t index) const {
return std::make_shared<tensor::Tensor>(shape_[index - kShapeIdx], TypeIdToType(kNumberTypeInt64)); return std::make_shared<tensor::Tensor>(shape_[index - kShapeIdx], TypeIdToType(kNumberTypeInt64));
} }
MS_LOG(EXCEPTION) << "Invalid index: " << index << " for CSRTensor: " << ToString(); MS_LOG(EXCEPTION) << "Invalid index: " << index << " for CSRTensor: " << ToString();
return nullptr;
} }
TensorPtr COOTensor::GetTensorAt(size_t index) const { TensorPtr COOTensor::GetTensorAt(size_t index) const {

View File

@ -174,7 +174,6 @@ class MS_CORE_API Tensor final : public MetaTensor {
/// ///
/// \param[in] tensor [Tensor] The input tensor. /// \param[in] tensor [Tensor] The input tensor.
explicit Tensor(const Tensor &tensor); explicit Tensor(const Tensor &tensor);
/// \brief Create tensor with given data type from another tensor. /// \brief Create tensor with given data type from another tensor.
/// ///
/// \param[in] tensor [Tensor] The input tensor. /// \param[in] tensor [Tensor] The input tensor.

View File

@ -406,16 +406,16 @@ void MSANFModelParser::SetCNodePrimAttrAndAbstract(const mind_ir::NodeProto &nod
abstract::AbstractTensorPtr MSANFModelParser::GetAbsTensorFromTensorProto(const mind_ir::TensorProto &tensor_proto) { abstract::AbstractTensorPtr MSANFModelParser::GetAbsTensorFromTensorProto(const mind_ir::TensorProto &tensor_proto) {
ShapeVector shape; ShapeVector shape;
for (int i = 0; i < tensor_proto.dims_size(); ++i) { for (int i = 0; i < tensor_proto.dims_size(); ++i) {
shape.emplace_back(tensor_proto.dims(i)); (void)shape.emplace_back(tensor_proto.dims(i));
} }
ShapeVector min_shape; ShapeVector min_shape;
for (int i = 0; i < tensor_proto.min_dims_size(); ++i) { for (int i = 0; i < tensor_proto.min_dims_size(); ++i) {
min_shape.emplace_back(tensor_proto.min_dims(i)); (void)min_shape.emplace_back(tensor_proto.min_dims(i));
} }
ShapeVector max_shape; ShapeVector max_shape;
for (int i = 0; i < tensor_proto.max_dims_size(); ++i) { for (int i = 0; i < tensor_proto.max_dims_size(); ++i) {
max_shape.emplace_back(tensor_proto.max_dims(i)); (void)max_shape.emplace_back(tensor_proto.max_dims(i));
} }
if (!tensor_proto.has_data_type()) { if (!tensor_proto.has_data_type()) {
@ -504,36 +504,38 @@ bool MSANFModelParser::GetTensorDataFromExternal(const mind_ir::TensorProto &ten
return false; return false;
} }
data = plain_data.get(); data = plain_data.get();
tenor_data_.emplace(tensor_proto.external_data().location(), std::move(plain_data)); (void)tenor_data_.emplace(tensor_proto.external_data().location(), std::move(plain_data));
} else { } else {
// Read file // Read file
std::basic_ifstream<char> fid(file, std::ios::in | std::ios::binary); std::basic_ifstream<char> fid(file, std::ios::in | std::ios::binary);
if (!fid) { if (!fid) {
MS_LOG(EXCEPTION) << "Open file '" << file << "' failed, please check the correct of the file."; MS_LOG(EXCEPTION) << "Open file '" << file << "' failed, please check the correct of the file.";
} }
fid.seekg(0, std::ios_base::end); (void)fid.seekg(0, std::ios_base::end);
size_t file_size = static_cast<size_t>(fid.tellg()); size_t file_size = static_cast<size_t>(fid.tellg());
fid.clear(); fid.clear();
fid.seekg(0); (void)fid.seekg(0);
auto plain_data = std::make_unique<char[]>(file_size); auto plain_data = std::make_unique<char[]>(file_size);
constexpr Byte is_little_endian = 1; constexpr Byte is_little_endian = 1;
constexpr int byte_order_index = 0; constexpr int byte_order_index = 0;
fid.read(plain_data.get(), file_size); (void)fid.read(plain_data.get(), SizeToLong(file_size));
fid.close(); fid.close();
// if byte order is not same return false // if byte order is not same return false
if ((plain_data[byte_order_index] == is_little_endian) != little_endian()) { if ((plain_data[byte_order_index] == is_little_endian) ^ little_endian()) {
MS_LOG(ERROR) << "The byte order of export MindIr device and load MindIr device is not same!"; MS_LOG(ERROR) << "The byte order of export MindIr device and load MindIr device is not same!";
return false; return false;
} }
data = reinterpret_cast<const unsigned char *>(plain_data.get()); data = reinterpret_cast<const unsigned char *>(plain_data.get());
tenor_data_.emplace(tensor_proto.external_data().location(), (void)tenor_data_.emplace(tensor_proto.external_data().location(),
std::unique_ptr<Byte[]>(reinterpret_cast<Byte *>(plain_data.release()))); std::unique_ptr<Byte[]>(reinterpret_cast<Byte *>(plain_data.release())));
} }
} }
auto *tensor_data_buf = reinterpret_cast<uint8_t *>(tensor_info->data_c()); auto *tensor_data_buf = reinterpret_cast<uint8_t *>(tensor_info->data_c());
MS_EXCEPTION_IF_NULL(tensor_data_buf); MS_EXCEPTION_IF_NULL(tensor_data_buf);
auto ret = common::huge_memcpy(tensor_data_buf, tensor_info->data().nbytes(), MS_EXCEPTION_IF_NULL(data);
data + tensor_proto.external_data().offset(), tensor_proto.external_data().length()); auto ret =
common::huge_memcpy(tensor_data_buf, tensor_info->data().nbytes(), data + tensor_proto.external_data().offset(),
LongToSize(tensor_proto.external_data().length()));
if (ret != 0) { if (ret != 0) {
MS_LOG(ERROR) << "Build parameter occur memcpy_s error."; MS_LOG(ERROR) << "Build parameter occur memcpy_s error.";
return false; return false;
@ -1065,15 +1067,15 @@ bool MSANFModelParser::GetAttrValueForValueNodeWithType(const std::string &value
mind_ir::TensorProto tensor_proto = attr_proto.tensors(0); mind_ir::TensorProto tensor_proto = attr_proto.tensors(0);
if (tensor_proto.has_raw_data()) { if (tensor_proto.has_raw_data()) {
// For real tensor. // For real tensor.
ObtainValueNodeInTensorForm(value_node_name, tensor_proto); (void)ObtainValueNodeInTensorForm(value_node_name, tensor_proto);
} else { } else {
// For data type. // For data type.
ObtainValueNodeInTypeForm(value_node_name, tensor_proto); (void)ObtainValueNodeInTypeForm(value_node_name, tensor_proto);
} }
break; break;
} }
case mind_ir::AttributeProto_AttributeType_NONE: { case mind_ir::AttributeProto_AttributeType_NONE: {
ObtainValueNodeInNoneForm(value_node_name); (void)ObtainValueNodeInNoneForm(value_node_name);
break; break;
} }
case mind_ir::AttributeProto_AttributeType_UMONAD: { case mind_ir::AttributeProto_AttributeType_UMONAD: {
@ -1158,7 +1160,7 @@ bool MSANFModelParser::GetAttrValueForValueNode(const std::string &value_node_na
break; break;
} }
case FORM_PARSE_NONE: { case FORM_PARSE_NONE: {
ObtainValueNodeInNoneForm(value_node_name); (void)ObtainValueNodeInNoneForm(value_node_name);
break; break;
} }
case FORM_PARSE_MONAD: { case FORM_PARSE_MONAD: {
@ -1350,10 +1352,6 @@ CNodePtr MSANFModelParser::BuildCNodeForFuncGraph(const FuncGraphPtr &outputFunc
bool MSANFModelParser::BuildReturnForFuncGraph(const FuncGraphPtr &outputFuncGraph, bool MSANFModelParser::BuildReturnForFuncGraph(const FuncGraphPtr &outputFuncGraph,
const mind_ir::GraphProto &importProto) { const mind_ir::GraphProto &importProto) {
MS_EXCEPTION_IF_NULL(outputFuncGraph); MS_EXCEPTION_IF_NULL(outputFuncGraph);
if (importProto.output_size() <= 0 || importProto.output_size() > INT_MAX) {
MS_LOG(ERROR) << "importProto.output_size is: " << importProto.output_size();
return false;
}
std::vector<AnfNodePtr> inputs; std::vector<AnfNodePtr> inputs;
if (importProto.output_size() > 1) { if (importProto.output_size() > 1) {
inputs.push_back(NewValueNode(prim::kPrimMakeTuple)); inputs.push_back(NewValueNode(prim::kPrimMakeTuple));
@ -1402,10 +1400,6 @@ bool MSANFModelParser::BuildReturnForFuncGraph(const FuncGraphPtr &outputFuncGra
bool MSANFModelParser::ImportNodesForGraph(const FuncGraphPtr &outputFuncGraph, bool MSANFModelParser::ImportNodesForGraph(const FuncGraphPtr &outputFuncGraph,
const mind_ir::GraphProto &importProto) { const mind_ir::GraphProto &importProto) {
MS_EXCEPTION_IF_NULL(outputFuncGraph); MS_EXCEPTION_IF_NULL(outputFuncGraph);
if (importProto.node_size() < 0 || importProto.node_size() > INT_MAX) {
MS_LOG(ERROR) << "importProto.node_size is: " << importProto.node_size();
return false;
}
MS_LOG(DEBUG) << "The node size: " << importProto.node_size(); MS_LOG(DEBUG) << "The node size: " << importProto.node_size();
CNodePtr cnode_ptr = nullptr; CNodePtr cnode_ptr = nullptr;
for (int i = 0; i < importProto.node_size(); ++i) { for (int i = 0; i < importProto.node_size(); ++i) {
@ -1514,11 +1508,10 @@ bool MSANFModelParser::SetValueForTopGraphParameter(const FuncGraphPtr &topGraph
const std::map<std::string, ValuePtr> &weights) { const std::map<std::string, ValuePtr> &weights) {
size_t fv_param_count = 0; size_t fv_param_count = 0;
auto parameters = topGraph->parameters(); auto parameters = topGraph->parameters();
for (int i = parameters.size() - 1; i >= 0; --i) { for (int64_t i = SizeToLong(parameters.size()) - 1; i >= 0; --i) {
size_t index = IntToSize(i); auto parameter = parameters[i]->cast<ParameterPtr>();
auto parameter = parameters[index]->cast<ParameterPtr>();
if (parameter == nullptr) { if (parameter == nullptr) {
MS_LOG(ERROR) << "AnfNode " << parameters[index]->DebugString() << " should be Parameter."; MS_LOG(ERROR) << "AnfNode " << parameters[i]->DebugString() << " should be Parameter.";
return false; return false;
} }
auto type = parameter->Type(); auto type = parameter->Type();
@ -1633,7 +1626,7 @@ const LayoutMap MSANFModelParser::ParseLayout(const mind_ir::ModelProto &model_p
} }
std::vector<int64_t> tensor_map; std::vector<int64_t> tensor_map;
for (int num = 0; num < layout_proto.tensor_map_int_size(); ++num) { for (int num = 0; num < layout_proto.tensor_map_int_size(); ++num) {
tensor_map.emplace_back(layout_proto.tensor_map_int(num)); (void)tensor_map.emplace_back(layout_proto.tensor_map_int(num));
} }
std::vector<int64_t> slice_shape; std::vector<int64_t> slice_shape;
for (int num = 0; num < layout_proto.slice_shape_int_size(); ++num) { for (int num = 0; num < layout_proto.slice_shape_int_size(); ++num) {
@ -1759,7 +1752,7 @@ abstract::AbstractBasePtr MSANFModelParser::BuildAbstractFunction(const mind_ir:
MS_LOG(WARNING) << "Can't get the abstract of function union closure: " << item_proto.DebugString(); MS_LOG(WARNING) << "Can't get the abstract of function union closure: " << item_proto.DebugString();
return nullptr; return nullptr;
} }
func_list.emplace_back(item_abstract->cast<abstract::AbstractFuncAtomPtr>()); (void)func_list.emplace_back(item_abstract->cast<abstract::AbstractFuncAtomPtr>());
} }
return std::make_shared<abstract::AbstractFuncUnion>(func_list); return std::make_shared<abstract::AbstractFuncUnion>(func_list);
} }
@ -1777,7 +1770,7 @@ void MSANFModelParser::CorrectFuncGraph(const FuncGraphPtr &root) {
auto valid = auto valid =
std::all_of(inputs.begin(), inputs.end(), [](const AnfNodePtr &arg) -> bool { return arg->abstract() != nullptr; }); std::all_of(inputs.begin(), inputs.end(), [](const AnfNodePtr &arg) -> bool { return arg->abstract() != nullptr; });
if (valid) { if (valid) {
ValidMindir(root); (void)ValidMindir(root);
} else { } else {
MS_LOG(INFO) << "There are some nullptr of abstract in the top function graph parameters." << root->DumpText(); MS_LOG(INFO) << "There are some nullptr of abstract in the top function graph parameters." << root->DumpText();
} }

View File

@ -64,7 +64,7 @@ using LayoutMap = std::map<string, LayoutPtr>;
class MSANFModelParser { class MSANFModelParser {
public: public:
MSANFModelParser() : producer_name_(""), model_version_(""), ir_version_("") {} MSANFModelParser() = default;
~MSANFModelParser() = default; ~MSANFModelParser() = default;
static void LoadTensorMapClear() { load_tensor_map_.clear(); } static void LoadTensorMapClear() { load_tensor_map_.clear(); }
@ -92,7 +92,7 @@ class MSANFModelParser {
bool BuildAttrForFuncGraph(const FuncGraphPtr &outputFuncGraph, const mind_ir::GraphProto &importProto); bool BuildAttrForFuncGraph(const FuncGraphPtr &outputFuncGraph, const mind_ir::GraphProto &importProto);
bool ImportParametersForGraph(const FuncGraphPtr &outputFuncGraph, const mind_ir::GraphProto &importProto); bool ImportParametersForGraph(const FuncGraphPtr &outputFuncGraph, const mind_ir::GraphProto &importProto);
bool ImportNodesForGraph(const FuncGraphPtr &outputFuncGraph, const mind_ir::GraphProto &importProto); bool ImportNodesForGraph(const FuncGraphPtr &outputFuncGraph, const mind_ir::GraphProto &importProto);
bool BuildParameterForFuncGraph(const ParameterPtr &node, const mind_ir::TensorProto &tensor_proto); bool BuildParameterForFuncGraph(const ParameterPtr &node, const mind_ir::TensorProto &parameter_proto);
bool SetValueForTopGraphParameter(const FuncGraphPtr &topGraph, const std::map<std::string, ValuePtr> &weights); bool SetValueForTopGraphParameter(const FuncGraphPtr &topGraph, const std::map<std::string, ValuePtr> &weights);
bool GetTensorDataFromExternal(const mind_ir::TensorProto &tensor_proto, const tensor::TensorPtr &tensor_info); bool GetTensorDataFromExternal(const mind_ir::TensorProto &tensor_proto, const tensor::TensorPtr &tensor_info);
bool BuildInputForFuncGraph(const ParameterPtr &node, const mind_ir::ValueInfoProto &value_proto); bool BuildInputForFuncGraph(const ParameterPtr &node, const mind_ir::ValueInfoProto &value_proto);
@ -116,13 +116,13 @@ class MSANFModelParser {
void SetCNodePrimAttrAndAbstract(const mind_ir::NodeProto &node_proto, const CNodePtr &cnode_ptr); void SetCNodePrimAttrAndAbstract(const mind_ir::NodeProto &node_proto, const CNodePtr &cnode_ptr);
bool ObtainValueNodeInTensorForm(const string &value_node_name, const mind_ir::TensorProto &attr_tensor); bool ObtainValueNodeInTensorForm(const string &value_node_name, const mind_ir::TensorProto &attr_tensor);
bool ObtainValueNodeInTupleTensorForm(const string &value_node_name, const mind_ir::AttributeProto &attr_proto); bool ObtainValueNodeInTupleTensorForm(const string &value_node_name, const mind_ir::AttributeProto &attr_proto);
bool GetAttrValueForValueNode(const std::string &value_node_name, const mind_ir::AttributeProto &attr_tensor); bool GetAttrValueForValueNode(const std::string &value_node_name, const mind_ir::AttributeProto &attr_proto);
bool GetAttrValueForValueNodeWithType(const std::string &value_node_name, const mind_ir::AttributeProto &attr_proto); bool GetAttrValueForValueNodeWithType(const std::string &value_node_name, const mind_ir::AttributeProto &attr_proto);
bool ObtainValueNodeInTypeForm(const string &value_node_name, const mind_ir::TensorProto &attr_tensor); bool ObtainValueNodeInTypeForm(const string &value_node_name, const mind_ir::TensorProto &attr_tensor);
bool ObtainValueNodeInNoneForm(const std::string &value_node_name); bool ObtainValueNodeInNoneForm(const std::string &value_node_name);
bool ObtainValueNodeInMonadForm(const std::string &value_node_name, const mind_ir::AttributeProto &attr_proto); bool ObtainValueNodeInMonadForm(const std::string &value_node_name, const mind_ir::AttributeProto &attr_proto);
ValuePtr ObtainValueInSequenceForm(const mind_ir::AttributeProto &attr_proto); ValuePtr ObtainValueInSequenceForm(const mind_ir::AttributeProto &attr_proto);
bool little_endian() { return little_endian_; } bool little_endian() const { return little_endian_; }
mindspore::HashMap<std::string, abstract::AbstractBasePtr> GetAbstractForNode( mindspore::HashMap<std::string, abstract::AbstractBasePtr> GetAbstractForNode(
const mind_ir::AttributeProto &attr_proto); const mind_ir::AttributeProto &attr_proto);
AnfNodePtr GetAnfNode(const std::string &node_name); AnfNodePtr GetAnfNode(const std::string &node_name);
@ -139,7 +139,7 @@ class MSANFModelParser {
mindspore::HashMap<std::string, AnfNodePtr> anfnode_build_map_; mindspore::HashMap<std::string, AnfNodePtr> anfnode_build_map_;
std::string mindir_path_; std::string mindir_path_;
const unsigned char *mindir_dec_key_{nullptr}; const unsigned char *mindir_dec_key_{nullptr};
size_t mindir_key_size_; size_t mindir_key_size_{0};
std::string mindir_dec_mode_; std::string mindir_dec_mode_;
bool little_endian_ = common::IsLittleByteOrder(); bool little_endian_ = common::IsLittleByteOrder();
std::map<std::string, std::unique_ptr<Byte[]>> tenor_data_; std::map<std::string, std::unique_ptr<Byte[]>> tenor_data_;

View File

@ -50,7 +50,7 @@ class MindIREngine {
void InferParameter(const AnfNodePtr &node); void InferParameter(const AnfNodePtr &node);
void InferValueNode(const AnfNodePtr &node); void InferValueNode(const AnfNodePtr &node);
void InferCNode(const AnfNodePtr &node); void InferCNode(const AnfNodePtr &node);
void EvalAbstractFunction(const abstract::AbstractFuncAtomPtr &abstractFunc, const CNodePtr &node, void EvalAbstractFunction(const abstract::AbstractFuncAtomPtr &func, const CNodePtr &node,
const AbstractBasePtrListPtr &args); const AbstractBasePtrListPtr &args);
void EvalPrimitiveAbastract(const abstract::PrimitiveAbstractClosurePtr &func, const CNodePtr &node, void EvalPrimitiveAbastract(const abstract::PrimitiveAbstractClosurePtr &func, const CNodePtr &node,
const AbstractBasePtrListPtr &args); const AbstractBasePtrListPtr &args);

View File

@ -61,8 +61,8 @@ bool get_all_files(const std::string &dir_in, std::vector<std::string> *files) {
return false; return false;
} }
if (S_ISDIR(st.st_mode)) { if (S_ISDIR(st.st_mode)) {
ret = static_cast<int>(get_all_files(name, files)); ret = get_all_files(name, files);
if (!ret) { if (ret) {
MS_LOG(ERROR) << "Get files failed, ret is : " << ret; MS_LOG(ERROR) << "Get files failed, ret is : " << ret;
closedir(open_dir); closedir(open_dir);
return false; return false;
@ -182,7 +182,7 @@ std::vector<std::string> MindIRLoader::LoadPreprocess(const std::string &file_na
return map_jsons; return map_jsons;
} }
std::vector<FuncGraphPtr> MindIRLoader::LoadMindIRs(std::vector<std::string> file_names) { std::vector<FuncGraphPtr> MindIRLoader::LoadMindIRs(const std::vector<std::string> &file_names) {
std::vector<FuncGraphPtr> funcgraph_vec; std::vector<FuncGraphPtr> funcgraph_vec;
MS_LOG(DEBUG) << "Load multiple MindIR files."; MS_LOG(DEBUG) << "Load multiple MindIR files.";
for (const auto &file_name : file_names) { for (const auto &file_name : file_names) {
@ -245,12 +245,12 @@ FuncGraphPtr MindIRLoader::LoadMindIR(const std::string &file_name) {
return nullptr; return nullptr;
} }
// Load parameter into graph // Load parameter into graph
if (endsWith(std::string(abs_path_buff), "_graph.mindir") && origin_model.graph().parameter_size() == 0) { if (endsWith(std::string(abs_path_buff), "_graph.mindir") && (origin_model.graph().parameter_size() == 0)) {
if (strlen(abs_path_buff) < strlen("graph.mindir")) { if (strlen(abs_path_buff) < strlen("graph.mindir")) {
MS_LOG(ERROR) << "The abs_path_buff length is less than 'graph.mindir'."; MS_LOG(ERROR) << "The abs_path_buff length is less than 'graph.mindir'.";
return nullptr; return nullptr;
} }
int path_len = SizeToInt(strlen(abs_path_buff) - strlen("graph.mindir")); size_t path_len = strlen(abs_path_buff) - strlen("graph.mindir");
string var_path = std::string(abs_path_buff).substr(0, path_len); string var_path = std::string(abs_path_buff).substr(0, path_len);
var_path += "variables"; var_path += "variables";
std::ifstream ifs(var_path); std::ifstream ifs(var_path);
@ -270,10 +270,6 @@ FuncGraphPtr MindIRLoader::LoadMindIR(const std::string &file_name) {
return nullptr; return nullptr;
} }
if (param_graph.parameter_size() < 0 || param_graph.parameter_size() > INT_MAX) {
MS_LOG(ERROR) << "param_graph.parameter_size() is : " << param_graph.parameter_size();
return nullptr;
}
for (int param_index = 0; param_index < param_graph.parameter_size(); param_index++) { for (int param_index = 0; param_index < param_graph.parameter_size(); param_index++) {
mind_ir::TensorProto *param_proto = mod_graph->add_parameter(); mind_ir::TensorProto *param_proto = mod_graph->add_parameter();
param_proto->set_name(param_graph.parameter(param_index).name()); param_proto->set_name(param_graph.parameter(param_index).name());

View File

@ -42,7 +42,7 @@ class MS_CORE_API MindIRLoader {
void InitModelParser(MSANFModelParser *model_parser); void InitModelParser(MSANFModelParser *model_parser);
FuncGraphPtr LoadMindIR(const void *buffer, const size_t &size); FuncGraphPtr LoadMindIR(const void *buffer, const size_t &size);
FuncGraphPtr LoadMindIR(const std::string &file_name); FuncGraphPtr LoadMindIR(const std::string &file_name);
std::vector<FuncGraphPtr> LoadMindIRs(const std::vector<std::string> file_names); std::vector<FuncGraphPtr> LoadMindIRs(const std::vector<std::string> &file_names);
std::vector<std::string> LoadPreprocess(const std::string &file_name); std::vector<std::string> LoadPreprocess(const std::string &file_name);
private: private:

View File

@ -26,8 +26,6 @@
namespace mindspore { namespace mindspore {
namespace { namespace {
const PrimitiveSet follow_first_input_prims = {prim::kPrimDepend, prim::kPrimLoad};
class AbstractMutexManager { class AbstractMutexManager {
public: public:
static AbstractMutexManager &GetInstance() { static AbstractMutexManager &GetInstance() {
@ -98,8 +96,7 @@ AnfNodePtr NewCustomActorNode(const CustomActorInfoPtr &actor_info, const FuncGr
} }
} // namespace } // namespace
AbstractScope::AbstractScope(std::recursive_mutex *mu) { AbstractScope::AbstractScope(std::recursive_mutex *mu) : mu_(mu) {
mu_ = mu;
if (mu_ != nullptr) { if (mu_ != nullptr) {
mu_->lock(); mu_->lock();
} }
@ -397,6 +394,7 @@ int64_t AnfUtils::GetIntValue(const AnfNodePtr &anf_node) {
std::pair<AnfNodePtr, size_t> AnfUtils::VisitKernel(const AnfNodePtr &anf_node, size_t index) { std::pair<AnfNodePtr, size_t> AnfUtils::VisitKernel(const AnfNodePtr &anf_node, size_t index) {
MS_EXCEPTION_IF_NULL(anf_node); MS_EXCEPTION_IF_NULL(anf_node);
const PrimitiveSet follow_first_input_prims = {prim::kPrimDepend, prim::kPrimLoad};
if (anf_node->isa<ValueNode>()) { if (anf_node->isa<ValueNode>()) {
return std::make_pair(anf_node, 0); return std::make_pair(anf_node, 0);
} else if (anf_node->isa<Parameter>()) { } else if (anf_node->isa<Parameter>()) {

View File

@ -535,10 +535,10 @@ TypePtr CheckAndConvertUtils::CheckTensorTypeSame(const std::map<std::string, Ty
std::set<string> order_set; std::set<string> order_set;
for (auto const &valid_type : check_list) { for (auto const &valid_type : check_list) {
if (valid_type->isa<TensorType>()) { if (valid_type->isa<TensorType>()) {
order_set.emplace(valid_type->ToString()); (void)order_set.emplace(valid_type->ToString());
break; break;
} else { } else {
order_set.emplace("Tensor[" + valid_type->ToString() + "]"); (void)order_set.emplace("Tensor[" + valid_type->ToString() + "]");
} }
} }
for (auto const &error_item : order_set) { for (auto const &error_item : order_set) {
@ -592,7 +592,6 @@ TypePtr CheckAndConvertUtils::CheckSparseTensorTypeValid(const std::string &type
MS_EXCEPTION(TypeError) << "For Primitive[" << prim_name << "], the input argument[" << type_name MS_EXCEPTION(TypeError) << "For Primitive[" << prim_name << "], the input argument[" << type_name
<< "] cast to SparseTensorTypePtr failed! Get type : " << type->ToString() << "."; << "] cast to SparseTensorTypePtr failed! Get type : " << type->ToString() << ".";
} }
return nullptr;
} }
ShapeVector CheckAndConvertUtils::CheckTensorIntValue(const std::string &type_name, const ValuePtr &value, ShapeVector CheckAndConvertUtils::CheckTensorIntValue(const std::string &type_name, const ValuePtr &value,
@ -645,16 +644,16 @@ TypePtr CheckAndConvertUtils::CheckTensorSubClass(const string &type_name, const
if (is_mix) { if (is_mix) {
for (const auto &item : template_types) { for (const auto &item : template_types) {
order_set.emplace(item->ToString()); (void)order_set.emplace(item->ToString());
} }
} }
for (const auto &item : template_types) { for (const auto &item : template_types) {
if (item->isa<TensorType>()) { if (item->isa<TensorType>()) {
order_set.emplace(item->ToString()); (void)order_set.emplace(item->ToString());
continue; continue;
} }
order_set.emplace("Tensor[" + item->ToString() + "]"); (void)order_set.emplace("Tensor[" + item->ToString() + "]");
} }
for (const auto &item : order_set) { for (const auto &item : order_set) {
@ -692,7 +691,7 @@ TypePtr CheckAndConvertUtils::CheckSubClass(const std::string &type_name, const
TypePtr CheckAndConvertUtils::CheckScalarOrTensorTypesSame(const std::map<std::string, TypePtr> &args, TypePtr CheckAndConvertUtils::CheckScalarOrTensorTypesSame(const std::map<std::string, TypePtr> &args,
const std::set<TypePtr> &valid_values, const std::set<TypePtr> &valid_values,
const std::string &prim_name, const bool allow_mix) { const std::string &prim_name, bool allow_mix) {
(void)_CheckTypeSame(args, prim_name, allow_mix); (void)_CheckTypeSame(args, prim_name, allow_mix);
return CheckTensorSubClass(args.begin()->first, args.begin()->second, valid_values, prim_name, true); return CheckTensorSubClass(args.begin()->first, args.begin()->second, valid_values, prim_name, true);
} }

View File

@ -26,7 +26,6 @@
#include "abstract/param_validator.h" #include "abstract/param_validator.h"
#include "base/base.h" #include "base/base.h"
#include "ir/anf.h" #include "ir/anf.h"
#include "ir/dtype/type_id.h"
#include "include/api/format.h" #include "include/api/format.h"
#include "utils/log_adapter.h" #include "utils/log_adapter.h"
#if __has_include("include/mindapi/base/types.h") #if __has_include("include/mindapi/base/types.h")
@ -301,12 +300,12 @@ class MS_CORE_API CheckAndConvertUtils {
static void CheckSummaryParam(const AbstractBasePtr &name, const AbstractBasePtr &value, static void CheckSummaryParam(const AbstractBasePtr &name, const AbstractBasePtr &value,
const std::string &class_name); const std::string &class_name);
static void CheckMode(const std::string &class_name); static void CheckMode(const std::string &class_name);
static std::vector<int64_t> CheckIntOrTupleInt(const std::string &prim_name, const ValuePtr &attr, static std::vector<int64_t> CheckIntOrTupleInt(const std::string &arg_name, const ValuePtr &attr,
const std::string &arg_name); const std::string &prim_name);
static std::vector<int64_t> CheckTupleInt(const std::string &prim_name, const ValuePtr &attr, static std::vector<int64_t> CheckTupleInt(const std::string &arg_name, const ValuePtr &attr,
const std::string &arg_name); const std::string &prim_name);
static std::vector<int64_t> CheckListInt(const std::string &prim_name, const ValuePtr &attr, static std::vector<int64_t> CheckListInt(const std::string &arg_name, const ValuePtr &attr,
const std::string &arg_name); const std::string &prim_name);
static void CheckMinMaxShape(const ShapeVector &shape, ShapeVector *min_shape, ShapeVector *max_shape); static void CheckMinMaxShape(const ShapeVector &shape, ShapeVector *min_shape, ShapeVector *max_shape);
static int64_t GetAndCheckFormat(const ValuePtr &value); static int64_t GetAndCheckFormat(const ValuePtr &value);
static size_t GetRemoveMonadAbsNum(const AbstractBasePtrList &abs_list); static size_t GetRemoveMonadAbsNum(const AbstractBasePtrList &abs_list);

View File

@ -77,7 +77,7 @@ class CompactSet {
T pop() { T pop() {
T e = std::move(data_.front()); T e = std::move(data_.front());
data_.erase(data_.begin()); (void)data_.erase(data_.begin());
return e; return e;
} }

View File

@ -15,10 +15,8 @@
*/ */
#include "utils/info.h" #include "utils/info.h"
#include <utility>
#include <fstream> #include <fstream>
#include <sstream> #include <sstream>
#include <climits>
#include "ir/anf.h" #include "ir/anf.h"
#include "ir/graph_utils.h" #include "ir/graph_utils.h"
#include "ir/func_graph.h" #include "ir/func_graph.h"

View File

@ -19,7 +19,6 @@
#include <iostream> #include <iostream>
#include <memory> #include <memory>
#include <string> #include <string>
#include <vector>
#include "utils/hash_map.h" #include "utils/hash_map.h"
#include "ir/anf.h" #include "ir/anf.h"
@ -27,7 +26,8 @@ namespace mindspore {
namespace label_manage { namespace label_manage {
enum class TraceLabelType { kShortSymbol, kFullName, kWithUniqueId }; enum class TraceLabelType { kShortSymbol, kFullName, kWithUniqueId };
MS_CORE_API TraceLabelType GetGlobalTraceLabelType(); MS_CORE_API TraceLabelType GetGlobalTraceLabelType();
MS_CORE_API std::string Label(const DebugInfoPtr &debug_info, TraceLabelType trace_type = TraceLabelType::kShortSymbol); MS_CORE_API std::string Label(const DebugInfoPtr &debug_info,
TraceLabelType trace_label = TraceLabelType::kShortSymbol);
} // namespace label_manage } // namespace label_manage
} // namespace mindspore } // namespace mindspore

View File

@ -17,8 +17,8 @@
#ifndef MINDSPORE_CORE_UTILS_LOG_ADAPTER_H_ #ifndef MINDSPORE_CORE_UTILS_LOG_ADAPTER_H_
#define MINDSPORE_CORE_UTILS_LOG_ADAPTER_H_ #define MINDSPORE_CORE_UTILS_LOG_ADAPTER_H_
#include <stdarg.h> #include <cstdarg>
#include <stdint.h> #include <cstdint>
#include <string> #include <string>
#include <sstream> #include <sstream>
#include <memory> #include <memory>
@ -45,7 +45,6 @@
static constexpr size_t GetRelPathPos() noexcept { static constexpr size_t GetRelPathPos() noexcept {
return sizeof(__FILE__) > sizeof(LOG_HDR_FILE_REL_PATH) ? sizeof(__FILE__) - sizeof(LOG_HDR_FILE_REL_PATH) : 0; return sizeof(__FILE__) > sizeof(LOG_HDR_FILE_REL_PATH) ? sizeof(__FILE__) - sizeof(LOG_HDR_FILE_REL_PATH) : 0;
} }
namespace mindspore { namespace mindspore {
/// \brief The handler map for ACL. /// \brief The handler map for ACL.
MS_CORE_API extern std::map<void **, std::thread *> acl_handle_map; MS_CORE_API extern std::map<void **, std::thread *> acl_handle_map;
@ -231,11 +230,7 @@ MS_EXPORT extern enum MsLogLevel this_thread_max_log_level;
MS_EXPORT extern thread_local enum MsLogLevel this_thread_max_log_level; MS_EXPORT extern thread_local enum MsLogLevel this_thread_max_log_level;
class TryCatchGuard { class TryCatchGuard {
public: public:
TryCatchGuard() { TryCatchGuard() : origin_log_level_(this_thread_max_log_level) { this_thread_max_log_level = MsLogLevel::WARNING; }
origin_log_level_ = this_thread_max_log_level;
this_thread_max_log_level = MsLogLevel::WARNING;
}
~TryCatchGuard() { this_thread_max_log_level = origin_log_level_; } ~TryCatchGuard() { this_thread_max_log_level = origin_log_level_; }
private: private:
@ -330,11 +325,11 @@ inline bool IS_OUTPUT_ON(enum MsLogLevel level) noexcept(true) {
} \ } \
} while (0) } while (0)
#define MS_EXCEPTION_IF_ZERO(name, value) \ #define MS_EXCEPTION_IF_ZERO(name, value) \
do { \ do { \
if (value == 0) { \ if ((value) == 0) { \
MS_LOG(EXCEPTION) << ": The " << name << " is zero."; \ MS_LOG(EXCEPTION) << ": The " << (name) << " is zero."; \
} \ } \
} while (0) } while (0)
#define MS_ERROR_IF_NULL(ptr) \ #define MS_ERROR_IF_NULL(ptr) \

View File

@ -17,7 +17,6 @@
#ifndef MINDSPORE_CORE_UTILS_MISC_H_ #ifndef MINDSPORE_CORE_UTILS_MISC_H_
#define MINDSPORE_CORE_UTILS_MISC_H_ #define MINDSPORE_CORE_UTILS_MISC_H_
#include <list>
#include <memory> #include <memory>
#include <string> #include <string>
#include <sstream> #include <sstream>

View File

@ -15,11 +15,11 @@
*/ */
#include "utils/ms_context.h" #include "utils/ms_context.h"
#include <cstdlib>
#include <thread> #include <thread>
#include <atomic> #include <atomic>
#include <fstream> #include <fstream>
#include <algorithm> #include <algorithm>
#include <utility>
#include "ir/tensor.h" #include "ir/tensor.h"
#include "utils/ms_utils.h" #include "utils/ms_utils.h"

View File

@ -20,14 +20,11 @@
#include <memory> #include <memory>
#include <map> #include <map>
#include <set> #include <set>
#include <vector>
#include <string> #include <string>
#include <utility>
#include <functional> #include <functional>
#include "utils/log_adapter.h" #include "utils/log_adapter.h"
#include "utils/ms_utils.h" #include "utils/ms_utils.h"
#ifdef ENABLE_TDTQUE #ifdef ENABLE_TDTQUE
#include "pybind11/pybind11.h"
#include "minddata/dataset/engine/device_queue_impl/tdt/tdt_handle.h" #include "minddata/dataset/engine/device_queue_impl/tdt/tdt_handle.h"
using mindspore::dataset::TdtHandle; using mindspore::dataset::TdtHandle;
#endif #endif
@ -146,7 +143,7 @@ enum MsCtxParam : unsigned {
class MS_CORE_API MsContext { class MS_CORE_API MsContext {
public: public:
MsContext(const std::string &backend_policy, const std::string &target); MsContext(const std::string &policy, const std::string &target);
~MsContext() = default; ~MsContext() = default;
MsContext(const MsContext &) = delete; MsContext(const MsContext &) = delete;
MsContext &operator=(const MsContext &) = delete; MsContext &operator=(const MsContext &) = delete;

View File

@ -98,9 +98,9 @@ class OrderedSet {
// insert an element to the end of OrderedSet. // insert an element to the end of OrderedSet.
std::pair<iterator, bool> insert(const element_type &e) { return insert(ordered_data_.end(), e); } std::pair<iterator, bool> insert(const element_type &e) { return insert(ordered_data_.end(), e); }
void push_back(const element_type &e) { (void)insert(ordered_data_.end(), e); } void push_back(const element_type &e) const { (void)insert(ordered_data_.end(), e); }
void push_front(const element_type &e) { (void)insert(ordered_data_.begin(), e); } void push_front(const element_type &e) const { (void)insert(ordered_data_.begin(), e); }
// Remove an element, if removed return true, otherwise return false // Remove an element, if removed return true, otherwise return false
bool erase(const element_type &e) { bool erase(const element_type &e) {
@ -428,7 +428,7 @@ class OrderedSet<std::shared_ptr<T>> {
const element_type &front() const { return ordered_data_.front(); } const element_type &front() const { return ordered_data_.front(); }
// Return true if there are no common elements. // Return true if there are no common elements.
bool is_disjoint(const OrderedSet &other) { bool is_disjoint(const OrderedSet &other) const {
return std::all_of(begin(), end(), [&other](const auto &e) { return !other.contains(e); }); return std::all_of(begin(), end(), [&other](const auto &e) { return !other.contains(e); });
} }

View File

@ -34,7 +34,9 @@ std::ostream &operator<<(std::ostream &out, const std::vector<T> &v) {
size_t last = v.size() - 1; size_t last = v.size() - 1;
for (size_t i = 0; i < v.size(); ++i) { for (size_t i = 0; i < v.size(); ++i) {
out << v[i]; out << v[i];
if (i != last) out << ", "; if (i != last) {
out << ", ";
}
} }
out << "]"; out << "]";
return out; return out;

View File

@ -192,9 +192,9 @@ void Profile::Pop(void) noexcept {
ctx_ptr_ = ctx_ptr_->parent_; ctx_ptr_ = ctx_ptr_->parent_;
} }
ProfContext::ProfContext(const std::string &name, ProfileBase *const prof) : name_(name), prof_(prof) { ProfContext::ProfContext(const std::string &name, ProfileBase *const prof)
: name_(name), prof_(prof), time_info_(nullptr) {
// Initialize a subcontext. // Initialize a subcontext.
time_info_ = nullptr;
if (prof == nullptr || IsTopContext()) { if (prof == nullptr || IsTopContext()) {
parent_ = nullptr; parent_ = nullptr;
} else { } else {

View File

@ -35,6 +35,7 @@ class ProfileBase;
struct TimeInfo { struct TimeInfo {
explicit TimeInfo(double time = -1.0) : time_(time), dict_(nullptr), actionNum_(0) {} explicit TimeInfo(double time = -1.0) : time_(time), dict_(nullptr), actionNum_(0) {}
TimeInfo(const TimeInfo &) = delete; TimeInfo(const TimeInfo &) = delete;
TimeInfo &operator=(const TimeInfo &) = delete;
~TimeInfo(); ~TimeInfo();
double time_; double time_;
@ -49,7 +50,7 @@ class MS_CORE_API ProfContext {
friend class ProfTransaction; friend class ProfTransaction;
public: public:
ProfContext(const std::string &name, ProfileBase *prof); ProfContext(const std::string &name, ProfileBase *const prof);
~ProfContext(); ~ProfContext();
ProfContext(const ProfContext &) = delete; ProfContext(const ProfContext &) = delete;
@ -103,6 +104,7 @@ class MS_CORE_API ProfTransaction {
explicit ProfTransaction(const ProfileBase *prof); explicit ProfTransaction(const ProfileBase *prof);
explicit ProfTransaction(ProfContext *const ctx) : ctx_(ctx) {} explicit ProfTransaction(ProfContext *const ctx) : ctx_(ctx) {}
ProfTransaction(const ProfTransaction &) = delete; ProfTransaction(const ProfTransaction &) = delete;
ProfTransaction &operator=(const ProfTransaction &) = delete;
~ProfTransaction(); ~ProfTransaction();
template <class Function> template <class Function>
@ -146,7 +148,7 @@ class MS_CORE_API DumpTime {
DumpTime &operator=(const DumpTime &) = delete; DumpTime &operator=(const DumpTime &) = delete;
static DumpTime &GetInstance(); static DumpTime &GetInstance();
void set_file_path(const std::string &save_path) { file_path_ = save_path; } void set_file_path(const std::string &save_path) { file_path_ = save_path; }
void Record(const std::string &name, const double time, const bool is_start); void Record(const std::string &step_name, const double time, const bool is_start);
void Save(); void Save();
private: private:
@ -157,10 +159,7 @@ class MS_CORE_API DumpTime {
}; };
struct TimeStat { struct TimeStat {
TimeStat() { TimeStat() : time_(0.0), count_(0) {}
time_ = 0.0;
count_ = 0;
}
~TimeStat() = default; ~TimeStat() = default;
void operator+=(double t) { void operator+=(double t) {

View File

@ -42,7 +42,7 @@ template <class FuncType>
class Signal { class Signal {
public: public:
template <class... Args> template <class... Args>
void operator()(Args &&... args) { void operator()(Args &&... args) const {
for (auto &slot : slots_) { for (auto &slot : slots_) {
if (slot->callback != nullptr) { if (slot->callback != nullptr) {
slot->callback(std::forward<Args>(args)...); slot->callback(std::forward<Args>(args)...);
@ -66,4 +66,4 @@ class Signal {
}; };
} // namespace mindspore } // namespace mindspore
#endif // MINDSPORE_CORE_UTILS_EVENT_H_ #endif // MINDSPORE_CORE_UTILS_SIGNAL_H_

View File

@ -97,7 +97,7 @@ class Platform {
// check the null point // check the null point
#define MS_EXCEPT_CHECK_NULL(value) \ #define MS_EXCEPT_CHECK_NULL(value) \
do { \ do { \
if (value == nullptr) { \ if ((value) == nullptr) { \
MS_LOG(EXCEPTION) << "The value is null."; \ MS_LOG(EXCEPTION) << "The value is null."; \
} \ } \
} while (0) } while (0)

View File

@ -17,7 +17,7 @@
#ifndef MINDSPORE_CORE_UTILS_SYSTEM_CRC32C_H_ #ifndef MINDSPORE_CORE_UTILS_SYSTEM_CRC32C_H_
#define MINDSPORE_CORE_UTILS_SYSTEM_CRC32C_H_ #define MINDSPORE_CORE_UTILS_SYSTEM_CRC32C_H_
#include <stddef.h> #include <cstddef>
#include <cstdint> #include <cstdint>
#include "utils/system/base.h" #include "utils/system/base.h"
#include "utils/system/env.h" #include "utils/system/env.h"
@ -26,7 +26,7 @@
namespace mindspore { namespace mindspore {
namespace system { namespace system {
// Align n to (1 << m) byte boundary // Align n to (1 << m) byte boundary
#define MEM_ALIGN(n, m) ((n + ((1 << (m)) - 1)) & ~((1 << (m)) - 1)) #define MEM_ALIGN(n, m) (((n) + ((1 << (m)) - 1)) & (~((1 << (m)) - 1)))
// Masked for crc. // Masked for crc.
static constexpr uint32 kMaskDelta = 0xa282ead8ul; static constexpr uint32 kMaskDelta = 0xa282ead8ul;

View File

@ -17,8 +17,8 @@
#ifndef MINDSPORE_CORE_UTILS_SYSTEM_FILE_SYSTEM_H_ #ifndef MINDSPORE_CORE_UTILS_SYSTEM_FILE_SYSTEM_H_
#define MINDSPORE_CORE_UTILS_SYSTEM_FILE_SYSTEM_H_ #define MINDSPORE_CORE_UTILS_SYSTEM_FILE_SYSTEM_H_
#include <errno.h>
#include <sys/param.h> #include <sys/param.h>
#include <cerrno>
#include <cstdint> #include <cstdint>
#include <cstdlib> #include <cstdlib>
#include <cstdio> #include <cstdio>

View File

@ -19,9 +19,9 @@
#include <map> #include <map>
#include <functional> #include <functional>
namespace mindspore { namespace mindspore {
tensor::TensorPtr TensorConstructUtils::CreateZerosTensor(const TypePtr &type_ptr, const std::vector<int64_t> &shape) { tensor::TensorPtr TensorConstructUtils::CreateZerosTensor(const TypePtr &type, const std::vector<int64_t> &shape) {
MS_EXCEPTION_IF_NULL(type_ptr); MS_EXCEPTION_IF_NULL(type);
auto type_id = ExtractTypeId(type_ptr); auto type_id = ExtractTypeId(type);
tensor::TensorPtr tensor = std::make_shared<tensor::Tensor>(type_id, shape); tensor::TensorPtr tensor = std::make_shared<tensor::Tensor>(type_id, shape);
size_t mem_size = IntToSize(tensor->ElementsNum()); size_t mem_size = IntToSize(tensor->ElementsNum());
auto tensor_data = tensor->data_c(); auto tensor_data = tensor->data_c();
@ -32,9 +32,9 @@ tensor::TensorPtr TensorConstructUtils::CreateZerosTensor(const TypePtr &type_pt
return tensor; return tensor;
} }
tensor::TensorPtr TensorConstructUtils::CreateOnesTensor(const TypePtr &type_ptr, const std::vector<int64_t> &shape) { tensor::TensorPtr TensorConstructUtils::CreateOnesTensor(const TypePtr &type, const std::vector<int64_t> &shape) {
MS_EXCEPTION_IF_NULL(type_ptr); MS_EXCEPTION_IF_NULL(type);
auto type_id = ExtractTypeId(type_ptr); auto type_id = ExtractTypeId(type);
tensor::TensorPtr tensor = std::make_shared<tensor::Tensor>(type_id, shape); tensor::TensorPtr tensor = std::make_shared<tensor::Tensor>(type_id, shape);
const size_t &mem_size = IntToSize(tensor->ElementsNum()); const size_t &mem_size = IntToSize(tensor->ElementsNum());
auto tensor_data = tensor->data_c(); auto tensor_data = tensor->data_c();
@ -65,30 +65,31 @@ tensor::TensorPtr TensorConstructUtils::CreateOnesTensor(const TypePtr &type_ptr
}; };
const auto &tensor_type = tensor->data_type(); const auto &tensor_type = tensor->data_type();
if (!type_dict.count(tensor_type)) { auto iter = type_dict.find(tensor_type);
if (iter == type_dict.end()) {
MS_LOG(EXCEPTION) << "unsupported data type: " << tensor_type; MS_LOG(EXCEPTION) << "unsupported data type: " << tensor_type;
} }
type_dict[tensor_type](); iter->second();
return tensor; return tensor;
} }
tensor::TensorPtr TensorConstructUtils::CreateTensor(const TypePtr &type_ptr, const std::vector<int64_t> &shape, tensor::TensorPtr TensorConstructUtils::CreateTensor(const TypePtr &type, const std::vector<int64_t> &shape,
void *data) { void *data) {
MS_EXCEPTION_IF_NULL(type_ptr); MS_EXCEPTION_IF_NULL(type);
auto type_id = ExtractTypeId(type_ptr); auto type_id = ExtractTypeId(type);
tensor::TensorPtr tensor = std::make_shared<tensor::Tensor>(type_id, shape, data, type_id); tensor::TensorPtr tensor = std::make_shared<tensor::Tensor>(type_id, shape, data, type_id);
return tensor; return tensor;
} }
TypeId TensorConstructUtils::ExtractTypeId(const TypePtr &type_ptr) { TypeId TensorConstructUtils::ExtractTypeId(const TypePtr &type) {
MS_EXCEPTION_IF_NULL(type_ptr); MS_EXCEPTION_IF_NULL(type);
TypeId type_id; TypeId type_id;
if (type_ptr->isa<TensorType>()) { if (type->isa<TensorType>()) {
auto tensor_type = type_ptr->cast<TensorTypePtr>(); auto tensor_type = type->cast<TensorTypePtr>();
MS_EXCEPTION_IF_NULL(tensor_type); MS_EXCEPTION_IF_NULL(tensor_type);
type_id = tensor_type->element()->type_id(); type_id = tensor_type->element()->type_id();
} else { } else {
type_id = type_ptr->type_id(); type_id = type->type_id();
} }
return type_id; return type_id;
} }

View File

@ -195,7 +195,7 @@ void GetFusedDebugInfos(const NodeDebugInfoSet &fused_debug_infos, std::vector<s
std::vector<std::string> debug_info_vec_str; std::vector<std::string> debug_info_vec_str;
GetSourceLineFromDebugInfo(debug_info, &debug_info_vec_str, kSectionPrefix); GetSourceLineFromDebugInfo(debug_info, &debug_info_vec_str, kSectionPrefix);
if (!debug_info_vec_str.empty()) { if (!debug_info_vec_str.empty()) {
(void)result->insert(result->end(), debug_info_vec_str.begin(), debug_info_vec_str.end()); (void)result->insert(result->cend(), debug_info_vec_str.cbegin(), debug_info_vec_str.cend());
is_empty = false; is_empty = false;
} }
} }
@ -215,7 +215,7 @@ void GetPrimalDebugInfos(const CNodePtr &cnode, std::vector<std::string> *result
std::vector<std::string> debug_info_vec_str; std::vector<std::string> debug_info_vec_str;
GetSourceLineFromDebugInfo(primal_debug_info, &debug_info_vec_str, kSectionPrefix); GetSourceLineFromDebugInfo(primal_debug_info, &debug_info_vec_str, kSectionPrefix);
if (!debug_info_vec_str.empty()) { if (!debug_info_vec_str.empty()) {
(void)result->insert(result->end(), debug_info_vec_str.begin(), debug_info_vec_str.end()); (void)result->insert(result->cend(), debug_info_vec_str.cbegin(), debug_info_vec_str.cend());
} }
} }
} }

View File

@ -20,8 +20,6 @@
#include <memory> #include <memory>
#include <string> #include <string>
#include <vector> #include <vector>
#include <utility>
#include <stack>
#include "utils/info.h" #include "utils/info.h"
#include "ir/anf.h" #include "ir/anf.h"

View File

@ -19,7 +19,6 @@
#include <string> #include <string>
#include <memory> #include <memory>
#include <utility>
namespace mindspore { namespace mindspore {
class TraceInfo; class TraceInfo;
@ -33,6 +32,7 @@ class TraceInfo {
public: public:
explicit TraceInfo(const DebugInfoPtr &info) : debug_info_(info) {} explicit TraceInfo(const DebugInfoPtr &info) : debug_info_(info) {}
TraceInfo(const TraceInfo &other) = default; TraceInfo(const TraceInfo &other) = default;
TraceInfo &operator=(const TraceInfo &) = default;
virtual ~TraceInfo() = default; virtual ~TraceInfo() = default;
virtual std::string name() const { return ""; } virtual std::string name() const { return ""; }
virtual std::string symbol() const { return ""; } virtual std::string symbol() const { return ""; }
@ -66,6 +66,7 @@ class TracePhi : public TraceInfo {
class TraceIfStmtTrueBranch : public TraceInfo { class TraceIfStmtTrueBranch : public TraceInfo {
public: public:
TraceIfStmtTrueBranch(const TraceIfStmtTrueBranch &) = default; TraceIfStmtTrueBranch(const TraceIfStmtTrueBranch &) = default;
TraceIfStmtTrueBranch &operator=(const TraceIfStmtTrueBranch &) = default;
explicit TraceIfStmtTrueBranch(const DebugInfoPtr &info) : TraceInfo(info) {} explicit TraceIfStmtTrueBranch(const DebugInfoPtr &info) : TraceInfo(info) {}
~TraceIfStmtTrueBranch() override = default; ~TraceIfStmtTrueBranch() override = default;
MS_DECLARE_TRACE_NAME_SYMBOL("if_true", ""); MS_DECLARE_TRACE_NAME_SYMBOL("if_true", "");
@ -75,6 +76,7 @@ class TraceIfStmtTrueBranch : public TraceInfo {
class TraceIfStmtFalseBranch : public TraceInfo { class TraceIfStmtFalseBranch : public TraceInfo {
public: public:
TraceIfStmtFalseBranch(const TraceIfStmtFalseBranch &) = default; TraceIfStmtFalseBranch(const TraceIfStmtFalseBranch &) = default;
TraceIfStmtFalseBranch &operator=(const TraceIfStmtFalseBranch &) = default;
explicit TraceIfStmtFalseBranch(const DebugInfoPtr &info) : TraceInfo(info) {} explicit TraceIfStmtFalseBranch(const DebugInfoPtr &info) : TraceInfo(info) {}
~TraceIfStmtFalseBranch() override = default; ~TraceIfStmtFalseBranch() override = default;
MS_DECLARE_TRACE_NAME_SYMBOL("if_false", ""); MS_DECLARE_TRACE_NAME_SYMBOL("if_false", "");