Fix some typo errors in mindspore ir module

Signed-off-by: leonwanghui <leon.wanghui@huawei.com>
This commit is contained in:
leonwanghui 2020-04-06 16:14:59 +08:00
parent 7a367af9c6
commit 322ffef3e4
19 changed files with 129 additions and 130 deletions

View File

@ -122,7 +122,7 @@ bool CastKernel(const TypeIdArgs &args, void *dst, const size_t data_size, const
TransDataSrc2Dst<uint16_t, int32_t>(args, dst, data_size);
break;
default:
MS_LOG(ERROR) << "unsupported datatype trans";
MS_LOG(ERROR) << "Unsupported datatype trans";
return false;
}
return true;
@ -132,7 +132,7 @@ size_t CubeSizeByType(const TypeId data_type) {
const size_t default_error = 0;
auto dt_size = TypeIdSize(data_type);
if (dt_size < 1) {
MS_LOG(ERROR) << "illegal dtype.";
MS_LOG(ERROR) << "Illegal dtype.";
return default_error;
} else if (dt_size == 1) {
return kCubeSize * 2;
@ -146,12 +146,12 @@ size_t ShapeSize(const std::vector<size_t> &shape) {
}
size_t TypeIdSize(const TypeId data_type) {
const size_t unsupport_type_error = 0;
const size_t unsupported_type_error = 0;
auto iter = type_map.find(data_type);
if (iter != type_map.end()) {
return iter->second;
}
return unsupport_type_error;
return unsupported_type_error;
}
std::vector<size_t> TransShapeTo4d(const std::vector<size_t> &shape) {
@ -174,7 +174,7 @@ std::vector<size_t> TransShapeTo4d(const std::vector<size_t> &shape) {
}
break;
default:
MS_LOG(EXCEPTION) << "Unexpeted shape size = " << shape.size();
MS_LOG(EXCEPTION) << "Unexpected shape size = " << shape.size();
}
return shape_4d;
}
@ -183,7 +183,7 @@ std::vector<size_t> TransShapeToDevice(const std::vector<size_t> &shape, const s
std::vector<size_t> device_shape;
if (format == kOpFormat_FRAC_NZ) {
if (shape.size() < 2) {
MS_EXCEPTION(NotSupportError) << "format " << format << " is not support shape " << shape.size();
MS_EXCEPTION(NotSupportError) << "Format " << format << " is not support shape " << shape.size();
}
if (shape.size() > 2) {
(void)std::copy(shape.begin(), shape.end() - 2, std::back_inserter(device_shape));
@ -231,37 +231,37 @@ std::vector<size_t> TransShapeToDevice(const std::vector<size_t> &shape, const s
}
bool TransDataType(const TypeIdArgs &args, void *result) {
MS_LOG(DEBUG) << "begin trans datatype from " << TypeIdLabel(args.host_data_type) << " to "
MS_LOG(DEBUG) << "Begin trans datatype from " << TypeIdLabel(args.host_data_type) << " to "
<< TypeIdLabel(args.device_data_type);
MS_EXCEPTION_IF_NULL(result);
std::pair<TypeId, TypeId> type_info(args.host_data_type, args.device_data_type);
auto iter = mode_map.find(type_info);
if (iter == mode_map.end()) {
MS_LOG(ERROR) << "unsupported datatype trans. src_type :" << TypeIdLabel(args.host_data_type)
MS_LOG(ERROR) << "Unsupported datatype trans. src_type :" << TypeIdLabel(args.host_data_type)
<< ", dst_type:" << TypeIdLabel(args.device_data_type);
return false;
}
auto trans_mode = iter->second;
auto type_size = TypeIdSize(args.device_data_type);
if (type_size < 1) {
MS_LOG(ERROR) << "invalid host data type.";
MS_LOG(ERROR) << "Invalid host data type.";
return false;
}
if (args.host_shape_size < 1) {
MS_LOG(ERROR) << "invalid host data size.";
MS_LOG(ERROR) << "Invalid host data size.";
return false;
}
if (!CastKernel(args, result, args.host_shape_size, trans_mode)) {
MS_LOG(ERROR) << "failed to trans datatype..";
MS_LOG(ERROR) << "Failed to trans datatype..";
return false;
}
return true;
}
bool TransFormat(const FormatArgs &args, void *result) {
MS_LOG(DEBUG) << "start trans format.";
MS_LOG(DEBUG) << "Start trans format.";
if (TypeIdSize(args.src_data_type) < 1) {
MS_LOG(ERROR) << "invalid datatype..";
MS_LOG(ERROR) << "Invalid datatype..";
return false;
}
if ((args.host_format == kOpFormat_NCHW || args.host_format == kOpFormat_ND) &&
@ -276,9 +276,9 @@ bool TransFormat(const FormatArgs &args, void *result) {
}
bool TransFormatFromDeviceToHost(const FormatArgs &args, void *result) {
MS_LOG(DEBUG) << "start trans format.";
MS_LOG(DEBUG) << "Start trans format.";
if (TypeIdSize(args.src_data_type) < 1) {
MS_LOG(ERROR) << "invalid datatype..";
MS_LOG(ERROR) << "Invalid datatype..";
return false;
}
if ((args.host_format == kOpFormat_NCHW || args.host_format == kOpFormat_ND) &&
@ -293,15 +293,15 @@ bool TransFormatFromDeviceToHost(const FormatArgs &args, void *result) {
}
bool NchwToFracZ(const FormatArgs &args, void *result) {
MS_LOG(DEBUG) << "trans format from nchw to frac_z";
MS_LOG(DEBUG) << "Trans format from nchw to frac_z";
MS_EXCEPTION_IF_NULL(result);
if (args.host_shape.size() != kNchwDims) {
MS_LOG(ERROR) << "invalid host shape, host shape dims:" << args.host_shape.size() << ", expect dims:" << kNchwDims;
MS_LOG(ERROR) << "Invalid host shape, host shape dims:" << args.host_shape.size() << ", expect dims:" << kNchwDims;
return false;
}
size_t size = TypeIdSize(args.src_data_type);
if (size < 1) {
MS_LOG(ERROR) << "illegal dtype.";
MS_LOG(ERROR) << "Illegal dtype.";
return false;
}
auto n = args.host_shape[0];
@ -311,7 +311,7 @@ bool NchwToFracZ(const FormatArgs &args, void *result) {
size_t c0 = CubeSizeByType(args.src_data_type);
if (c0 < 1) {
MS_LOG(ERROR) << "illegal dtype.";
MS_LOG(ERROR) << "Illegal dtype.";
return false;
}
size_t c1 = Ceil(c, c0);
@ -327,7 +327,7 @@ bool NchwToFracZ(const FormatArgs &args, void *result) {
size_t dst_size = total_ele_cnt * size;
if (dst_size != args.device_size) {
MS_LOG(ERROR) << "illegal total data size."
MS_LOG(ERROR) << "Illegal total data size."
<< "dst size is :" << dst_size << "device size is :" << args.device_size;
return false;
}
@ -369,20 +369,20 @@ bool NchwToFracZ(const FormatArgs &args, void *result) {
}
bool FracZToNchw(const FormatArgs &args, void *result) {
MS_LOG(DEBUG) << "trans format from frac_z to nchw";
MS_LOG(DEBUG) << "Trans format from frac_z to nchw";
MS_EXCEPTION_IF_NULL(result);
if (args.host_shape.size() != kNchwDims) {
MS_LOG(ERROR) << "invalid host shape, host shape dims:" << args.host_shape.size() << ", expect dims:" << kNchwDims;
MS_LOG(ERROR) << "Invalid host shape, host shape dims:" << args.host_shape.size() << ", expect dims:" << kNchwDims;
return false;
}
size_t size = TypeIdSize(args.src_data_type);
if (size < 1) {
MS_LOG(ERROR) << "illegal dtype.";
MS_LOG(ERROR) << "Illegal dtype.";
return false;
}
size_t total_size = ShapeSize(args.device_shape) * size;
if (total_size != args.device_size) {
MS_LOG(ERROR) << "illegal total data size, total_size:" << total_size << ", device_size:" << args.device_size;
MS_LOG(ERROR) << "Illegal total data size, total_size:" << total_size << ", device_size:" << args.device_size;
return false;
}
@ -435,7 +435,7 @@ bool FracZToNchw(const FormatArgs &args, void *result) {
bool TransShapeToNz(const std::vector<size_t> &host_shape, std::vector<size_t> *hw_shape) {
MS_EXCEPTION_IF_NULL(hw_shape);
if (host_shape.empty()) {
MS_LOG(ERROR) << "size of vector is 0.";
MS_LOG(ERROR) << "Size of vector is 0.";
return false;
}
switch (host_shape.size()) {
@ -447,7 +447,7 @@ bool TransShapeToNz(const std::vector<size_t> &host_shape, std::vector<size_t> *
default:
auto size = host_shape.size();
if (size < 2) {
MS_LOG(ERROR) << "illegal size.";
MS_LOG(ERROR) << "Illegal size.";
return false;
}
size_t times = 1;
@ -462,26 +462,26 @@ bool TransShapeToNz(const std::vector<size_t> &host_shape, std::vector<size_t> *
}
bool NchwToFracNz(const FormatArgs &args, void *result) {
MS_LOG(DEBUG) << "trans format from nchw to frac_nz.";
MS_LOG(DEBUG) << "Trans format from nchw to frac_nz.";
MS_EXCEPTION_IF_NULL(result);
std::vector<size_t> hw_shape;
if (!TransShapeToNz(args.host_shape, &hw_shape)) {
MS_LOG(ERROR) << "trans shape failed..";
MS_LOG(ERROR) << "Trans shape failed..";
return false;
}
if (hw_shape.size() < 3 || args.device_shape.size() < 4) {
MS_LOG(ERROR) << "invalid shape size.";
MS_LOG(ERROR) << "Invalid shape size.";
return false;
}
auto size = TypeIdSize(args.src_data_type);
if (size < 1) {
MS_LOG(ERROR) << "illegal dtype";
MS_LOG(ERROR) << "Illegal dtype";
return false;
}
auto dst_size = ShapeSize(args.device_shape) * size;
if (dst_size != args.device_size) {
MS_LOG(ERROR) << "illegal total data size, total_size:" << dst_size << ", device_size:" << args.device_size;
MS_LOG(ERROR) << "Illegal total data size, total_size:" << dst_size << ", device_size:" << args.device_size;
return false;
}
auto times = hw_shape.at(0);
@ -538,26 +538,26 @@ bool NchwToFracNz(const FormatArgs &args, void *result) {
}
bool FracNzToNchw(const FormatArgs &args, void *result) {
MS_LOG(DEBUG) << "trans format from frac_nz to nchw";
MS_LOG(DEBUG) << "Trans format from frac_nz to nchw";
MS_EXCEPTION_IF_NULL(result);
std::vector<size_t> hw_shape;
if (!TransShapeToNz(args.host_shape, &hw_shape)) {
MS_LOG(ERROR) << "trans shape failed..";
MS_LOG(ERROR) << "Trans shape failed..";
return false;
}
if (hw_shape.size() < 3 || args.device_shape.size() < 4) {
MS_LOG(ERROR) << "invalid shape size.";
MS_LOG(ERROR) << "Invalid shape size.";
return false;
}
auto size = TypeIdSize(args.src_data_type);
if (size < 1) {
MS_LOG(ERROR) << "illegal dtype";
MS_LOG(ERROR) << "Illegal dtype";
return false;
}
auto dst_size = ShapeSize(args.device_shape) * size;
if (dst_size != args.device_size) {
MS_LOG(ERROR) << "illegal total data size, total_size:" << dst_size << ", device_size:" << args.device_size;
MS_LOG(ERROR) << "Illegal total data size, total_size:" << dst_size << ", device_size:" << args.device_size;
return false;
}
auto times = hw_shape.at(0);
@ -614,20 +614,20 @@ bool FracNzToNchw(const FormatArgs &args, void *result) {
}
bool NchwToNc1hwc0(const FormatArgs &args, void *result) {
MS_LOG(DEBUG) << "trans format from nchw to Nc1h1wc0";
MS_LOG(DEBUG) << "Trans format from nchw to Nc1h1wc0";
MS_EXCEPTION_IF_NULL(result);
if (args.host_shape.size() != kNchwDims) {
MS_LOG(ERROR) << "invalid host shape, host shape dims:" << args.host_shape.size() << ", expect dims:" << kNchwDims;
MS_LOG(ERROR) << "Invalid host shape, host shape dims:" << args.host_shape.size() << ", expect dims:" << kNchwDims;
return false;
}
size_t size = TypeIdSize(args.src_data_type);
if (size < 1) {
MS_LOG(ERROR) << "illegal dtype.";
MS_LOG(ERROR) << "Illegal dtype.";
return false;
}
auto total_size = ShapeSize(args.device_shape) * size;
if (total_size != args.device_size) {
MS_LOG(ERROR) << "illegal total data size, total_size:" << total_size << ", device_size:" << args.device_size;
MS_LOG(ERROR) << "Illegal total data size, total_size:" << total_size << ", device_size:" << args.device_size;
return false;
}
@ -637,7 +637,7 @@ bool NchwToNc1hwc0(const FormatArgs &args, void *result) {
auto w = args.host_shape[3];
size_t c0 = CubeSizeByType(args.src_data_type);
if (c0 < 1) {
MS_LOG(ERROR) << "illegal dtype.";
MS_LOG(ERROR) << "Illegal dtype.";
return false;
}
size_t c1 = Ceil(c, c0);
@ -687,20 +687,20 @@ bool NchwToNc1hwc0(const FormatArgs &args, void *result) {
}
bool Nc1hwc0ToNchw(const FormatArgs &args, void *result) {
MS_LOG(DEBUG) << "trans format from nc1h1wc0 to nchw";
MS_LOG(DEBUG) << "Trans format from nc1h1wc0 to nchw";
MS_EXCEPTION_IF_NULL(result);
if (args.host_shape.size() != kNchwDims) {
MS_LOG(ERROR) << "invalid host shape, host shape dims:" << args.host_shape.size() << ", expect dims:" << kNchwDims;
MS_LOG(ERROR) << "Invalid host shape, host shape dims:" << args.host_shape.size() << ", expect dims:" << kNchwDims;
return false;
}
size_t size = TypeIdSize(args.src_data_type);
if (size < 1) {
MS_LOG(ERROR) << "illegal dtype.";
MS_LOG(ERROR) << "Illegal dtype.";
return false;
}
size_t total_size = ShapeSize(args.device_shape) * size;
if (total_size != args.device_size) {
MS_LOG(ERROR) << "illegal total data size, total_size:" << total_size << ", device_size:" << args.device_size;
MS_LOG(ERROR) << "Illegal total data size, total_size:" << total_size << ", device_size:" << args.device_size;
return false;
}

View File

@ -141,7 +141,7 @@ void DumpKernelInfo(const CNodePtr &node, const std::shared_ptr<SubGraphIRInfo>
void DumpParams(const FuncGraphPtr &graph, std::ostringstream &buffer, OrderedMap<AnfNodePtr, int32_t> *para_map) {
if (graph == nullptr) {
MS_LOG(INFO) << "param graph is nullptr.";
MS_LOG(INFO) << "Param graph is nullptr.";
return;
}
std::vector<AnfNodePtr> parameters = graph->parameters();
@ -175,17 +175,17 @@ void DumpParams(const FuncGraphPtr &graph, std::ostringstream &buffer, OrderedMa
if (para_map != nullptr) {
(*para_map)[p] = para++;
}
MS_LOG(DEBUG) << "record param: " << p->ToString() << " graph belong : " << p->func_graph()->ToString();
MS_LOG(DEBUG) << "Record param: " << p->ToString() << " graph belong : " << p->func_graph()->ToString();
}
}
void DumpOperator(const AnfNodePtr &op, const std::shared_ptr<SubGraphIRInfo> &gsub) {
if (op == nullptr) {
MS_LOG(INFO) << "param op is nullptr";
MS_LOG(INFO) << "Param op is nullptr";
return;
}
if (gsub == nullptr) {
MS_LOG(INFO) << "param gsub is nullptr";
MS_LOG(INFO) << "Param gsub is nullptr";
return;
}
@ -338,7 +338,7 @@ void DumpCNode(const CNodePtr &nd, const FuncGraphPtr &sub_graph, OrderedMap<Anf
}
if (nd->inputs().empty()) {
MS_LOG(EXCEPTION) << "input of apply node is empty";
MS_LOG(EXCEPTION) << "Input of apply node is empty";
}
// print operator
@ -376,7 +376,7 @@ void DumpIRInSubgraph(const std::vector<AnfNodePtr> &nodes, OrderedMap<AnfNodePt
MS_EXCEPTION_IF_NULL(nd);
FuncGraphPtr sub_graph = nd->func_graph();
if (sub_graph == nullptr) {
MS_LOG(DEBUG) << "node[" << nd->ToString() << "] belongs to no graph!";
MS_LOG(DEBUG) << "Node[" << nd->ToString() << "] belongs to no graph!";
continue;
}
std::shared_ptr<SubGraphIRInfo> gsub = (*sub_graphs)[sub_graph];
@ -430,12 +430,12 @@ void DumpIR(const std::string &filename, const FuncGraphPtr &graph, bool dump_fu
return;
}
if (filename.size() > PATH_MAX) {
MS_LOG(ERROR) << "file path " << filename << " is too long.";
MS_LOG(ERROR) << "File path " << filename << " is too long.";
return;
}
char real_path[PATH_MAX] = {0};
if (nullptr == realpath(filename.c_str(), real_path)) {
MS_LOG(DEBUG) << "dir " << filename << " does not exit.";
MS_LOG(DEBUG) << "Dir " << filename << " does not exit.";
}
OrderedMap<AnfNodePtr, int32_t> para_map;

View File

@ -49,7 +49,7 @@ std::string GetMsIrPath(void) {
path = path_ptr;
char real_path[PATH_MAX] = {0};
if (path.size() > PATH_MAX || nullptr == realpath(path.c_str(), real_path)) {
MS_LOG(EXCEPTION) << "MS IR Path error, " << path_ptr;
MS_LOG(EXCEPTION) << "MS IR path error, " << path_ptr;
}
path = real_path;
}
@ -144,8 +144,8 @@ std::string AnfExporter::GetValueNodeText(const FuncGraphPtr& fg, const ValueNod
}
std::string AnfExporter::GetMultitypeFuncGraphText(const prim::MultitypeFuncGraphPtr& mt_func_graph) {
auto py_funs = mt_func_graph->GetPyFunctions();
if (py_funs.empty()) {
auto py_funcs = mt_func_graph->GetPyFunctions();
if (py_funcs.empty()) {
return "";
}
@ -153,7 +153,7 @@ std::string AnfExporter::GetMultitypeFuncGraphText(const prim::MultitypeFuncGrap
oss << "{";
bool is_first = true;
for (const auto& py_func : py_funs) {
for (const auto& py_func : py_funcs) {
if (is_first) {
is_first = false;
} else {
@ -626,7 +626,7 @@ void AnfExporter::ExportFuncGraph(const std::string& filename, const FuncGraphPt
ofs << "\n\n";
(void)func_graph_set.erase(fg);
}
ofs << "# num of total funcgraphs: " << exported.size();
ofs << "# num of total function graphs: " << exported.size();
ofs.close();
}
@ -651,7 +651,7 @@ void AnfExporter::ExportFuncGraph(const std::string& filename, const std::vector
ofs << "\n\n";
}
ofs << "# num of total funcgraphs: " << graphs.size();
ofs << "# num of total function graphs: " << graphs.size();
ofs.close();
}
@ -763,7 +763,7 @@ class Lexer {
fin.close();
}
} catch (const std::exception& e) {
MS_LOG(ERROR) << "exception when closing file";
MS_LOG(ERROR) << "Exception when closing file";
} catch (...) {
std::string exName(abi::__cxa_current_exception_type()->name());
MS_LOG(ERROR) << "Error occurred when closing file. Exception name: " << exName;
@ -802,7 +802,7 @@ class Lexer {
Token token = GetNextTokenInner();
const char* str = token_text[token];
std::string text = (str == nullptr ? GetTokenText() : str);
MS_LOG(DEBUG) << "------parse token] " << text;
MS_LOG(DEBUG) << "------Parse token] " << text;
return token;
}
@ -1642,7 +1642,7 @@ class IrParser {
MS_LOG(EXCEPTION) << "Expect @file at line " << lexer_.GetLineNo();
}
// load prameter default value from serialized file
// load parameter default value from serialized file
py::object default_obj = LoadObject(lexer_.GetTokenText());
param->set_default_param(default_obj);
@ -1950,7 +1950,7 @@ class IrParser {
return TOK_ERROR;
}
// restore python funciton of PrimitivePy from serialized file
// restore python function of PrimitivePy from serialized file
py::object py_obj = LoadObject(lexer_.GetTokenText());
PrimitivePyPtr ptr = nullptr;
if (py::hasattr(py_obj, "__setattr_flag__") && py::hasattr(py_obj, "_clone")) {
@ -1958,7 +1958,7 @@ class IrParser {
py::object new_obj = clone_fn();
ptr = new_obj.cast<PrimitivePyPtr>();
if (ptr == nullptr) {
MS_LOG(EXCEPTION) << "cast to type 'PrimitivePyPtr' error";
MS_LOG(EXCEPTION) << "Cast to type 'PrimitivePyPtr' error";
}
} else {
ptr = std::make_shared<PrimitivePy>(id.substr(strlen("PrimitivePy::")), py_obj);
@ -2221,15 +2221,15 @@ class IrParser {
};
std::vector<FuncGraphPtr> ImportIR(const std::string& filename) {
IrParser paser(filename.c_str());
paser.ParseFile();
return paser.GetFuncGraphs();
IrParser parser(filename.c_str());
parser.ParseFile();
return parser.GetFuncGraphs();
}
#ifdef ENABLE_DUMP_IR
void DumpIRProto(const FuncGraphPtr& func_graph, const std::string& suffix) {
if (func_graph == nullptr) {
MS_LOG(ERROR) << "func graph is nullptr";
MS_LOG(ERROR) << "Func graph is nullptr";
return;
}
auto ms_context = MsContext::GetInstance();
@ -2243,16 +2243,16 @@ void DumpIRProto(const FuncGraphPtr& func_graph, const std::string& suffix) {
}
std::string file_path = save_graphs_path + "/" + "ms_output_" + suffix + ".pb";
if (file_path.size() > PATH_MAX) {
MS_LOG(ERROR) << "file path " << file_path << " is too long.";
MS_LOG(ERROR) << "File path " << file_path << " is too long.";
return;
}
char real_path[PATH_MAX] = {0};
if (nullptr == realpath(file_path.c_str(), real_path)) {
MS_LOG(DEBUG) << "dir " << file_path << " does not exit.";
MS_LOG(DEBUG) << "Dir " << file_path << " does not exit.";
} else {
std::string path_string = real_path;
if (chmod(common::SafeCStr(path_string), S_IRUSR | S_IWUSR) == -1) {
MS_LOG(ERROR) << "modify file:" << real_path << " to rw fail.";
MS_LOG(ERROR) << "Modify file:" << real_path << " to rw fail.";
return;
}
}

View File

@ -362,7 +362,7 @@ Digraph::~Digraph() {
fout_.close();
}
} catch (const std::exception& e) {
MS_LOG(ERROR) << "exception when closing file " << filename_;
MS_LOG(ERROR) << "Exception when closing file " << filename_;
}
}

View File

@ -208,7 +208,7 @@ void ProtoExporter::SetValueToProto(const ValuePtr& val, irpb::ValueProto* value
TypePtr elem_type = dyn_cast<TensorType>(val)->element();
type_proto->mutable_tensor_type()->set_elem_type(GetNumberDataType(elem_type));
} else {
MS_LOG(WARNING) << "Not supported type " << val->type_name();
MS_LOG(WARNING) << "Unsupported type " << val->type_name();
}
}

View File

@ -101,7 +101,7 @@ bool Dump::IsConfigValid(const nlohmann::json& dumpSettings) {
auto kernels = dumpSettings.at("kernels");
if (!(enable.is_boolean() && trans_flag.is_boolean() && mode.is_number() && path.is_string() &&
net_name.is_string() && iteration.is_number() && kernels.is_array())) {
MS_LOG(ERROR) << "element's type in Dump config json is invalid.";
MS_LOG(ERROR) << "Element's type in Dump config json is invalid.";
dump_enable_ = false;
return false;
}
@ -121,7 +121,7 @@ bool Dump::IsConfigValid(const nlohmann::json& dumpSettings) {
bool Dump::SetDumpConfFromJsonFile() {
const char* config_path_str = std::getenv("MINDSPORE_CONFIG_PATH");
if (config_path_str != nullptr) {
MS_LOG(INFO) << "getenv MINDSPORE_CONFIG_PATH :" << config_path_str;
MS_LOG(INFO) << "Getenv MINDSPORE_CONFIG_PATH :" << config_path_str;
} else {
MS_LOG(INFO) << "No need E2E Dump. please export MINDSPORE_CONFIG_PATH eg: MINDSPORE_CONFIG_PATH=/etc";
dump_enable_ = false;
@ -132,7 +132,7 @@ bool Dump::SetDumpConfFromJsonFile() {
auto id = context_ptr->device_id();
char real_path[PATH_MAX] = {0};
if (nullptr == realpath(config_path_str, real_path)) {
MS_LOG(ERROR) << "env e2e dump path error, " << config_path_str;
MS_LOG(ERROR) << "Env e2e dump path error, " << config_path_str;
dump_enable_ = false;
return false;
}
@ -150,20 +150,20 @@ bool Dump::SetDumpConfFromJsonFile() {
bool Dump::DumpToFile(const std::string& filename, const void* data, size_t len) {
if (filename.empty() || data == nullptr || len == 0) {
MS_LOG(ERROR) << "incorrect parameter.";
MS_LOG(ERROR) << "Incorrect parameter.";
return false;
}
std::string realpath;
bool ret = GetRealPath(filename, &realpath);
if (!ret) {
MS_LOG(ERROR) << "get real path failed.";
MS_LOG(ERROR) << "Get real path failed.";
return false;
}
std::ofstream fd;
fd.open(realpath, std::ios::binary | std::ios::out);
if (!fd.is_open()) {
MS_LOG(ERROR) << "open file " << realpath << " fail.";
MS_LOG(ERROR) << "Open file " << realpath << " fail.";
return false;
}
(void)fd.write(reinterpret_cast<const char*>(data), SizeToLong(len));
@ -182,7 +182,7 @@ bool Dump::GetRealPath(const std::string& inpath, std::string* outpath) {
if (path_split_pos != std::string::npos) {
std::string prefix_path = inpath.substr(0, path_split_pos);
if (prefix_path.length() >= PATH_MAX) {
MS_LOG(ERROR) << "prefix path is too longer!";
MS_LOG(ERROR) << "Prefix path is too longer!";
return false;
}
std::string last_path = inpath.substr(path_split_pos, inpath.length() - path_split_pos);
@ -201,11 +201,11 @@ bool Dump::GetRealPath(const std::string& inpath, std::string* outpath) {
if (path_split_pos == std::string::npos) {
if (inpath.length() >= PATH_MAX) {
MS_LOG(ERROR) << "prefix path is too longer!";
MS_LOG(ERROR) << "Prefix path is too longer!";
return false;
}
if (nullptr == realpath(inpath.c_str(), real_path)) {
MS_LOG(ERROR) << "file " << inpath << " does not exit, it will be created.";
MS_LOG(ERROR) << "File " << inpath << " does not exit, it will be created.";
}
*outpath = std::string(real_path);
}
@ -218,7 +218,7 @@ bool Dump::CreateNotExistDirs(const std::string& path) {
MS_EXCEPTION_IF_NULL(fs);
char temp_path[PATH_MAX] = {0};
if (path.length() > PATH_MAX) {
MS_LOG(ERROR) << "path lens is max than " << PATH_MAX;
MS_LOG(ERROR) << "Path lens is max than " << PATH_MAX;
return false;
}
for (uint32_t i = 0; i < path.length(); i++) {
@ -229,7 +229,7 @@ bool Dump::CreateNotExistDirs(const std::string& path) {
temp_path[i] = '\0';
std::string path_handle(temp_path);
if (!fs->FileExist(temp_path)) {
MS_LOG(INFO) << "dir " << path_handle << " does not exit, creating...";
MS_LOG(INFO) << "Dir " << path_handle << " does not exit, creating...";
if (!fs->CreateDir(temp_path)) {
MS_LOG(ERROR) << "Create " << path_handle << " dir error";
return false;
@ -241,7 +241,7 @@ bool Dump::CreateNotExistDirs(const std::string& path) {
}
if (!fs->FileExist(path)) {
MS_LOG(INFO) << "dir " << path << " does not exit, creating...";
MS_LOG(INFO) << "Dir " << path << " does not exit, creating...";
if (!fs->CreateDir(path)) {
MS_LOG(ERROR) << "Create " << path << " dir error";
return false;

View File

@ -193,7 +193,7 @@ void TraceManager::DebugTrace(const TraceInfoPtr& trace_info) {
}
TraceContextPtr context = std::make_shared<TraceContext>(trace_info);
if (trace_info->debug_info() == nullptr) {
MS_LOG(EXCEPTION) << "trace debug info is null";
MS_LOG(EXCEPTION) << "Trace debug info is null";
}
TraceManager::trace_context_stack_.push(context);
}
@ -205,7 +205,7 @@ void TraceManager::DebugTrace(const DebugInfoPtr& debug_info, const TraceInfoPtr
auto cloned_info = trace_info->clone();
cloned_info->set_debug_info(debug_info);
if (cloned_info->debug_info() == nullptr) {
MS_LOG(EXCEPTION) << "trace debug info is null with cloned trace";
MS_LOG(EXCEPTION) << "Trace debug info is null with cloned trace";
}
TraceContextPtr context = std::make_shared<TraceContext>(cloned_info);
TraceManager::trace_context_stack_.push(context);

View File

@ -89,7 +89,7 @@ std::string GetDebugInfo(const DebugInfoPtr& info, SourceLineTip tip) {
return "";
}
// a trace info identifys a node transform, so we can trace the node transform through
// a trace info identifies a node transform, so we can trace the node transform through
// a link of trace info and debug info
std::string GetInfoWithAction(const std::vector<DebugInfoPtr>& info_vec, SourceLineTip tip) {
if (info_vec.size() < 1) {
@ -173,7 +173,7 @@ void DumpInferStack(std::ostringstream& oss) {
}
auto graph_context = graph_infer->graph_context();
if (graph_context == nullptr) {
MS_LOG(INFO) << "null context continue";
MS_LOG(INFO) << "Null context continue";
continue;
}
auto graph = graph_context->func_graph();
@ -264,7 +264,7 @@ void AnalyzedFuncGraphExporter::ExportFuncGraph(const std::string& filename,
param_index = 1;
auto tagged_func_graphs = CalcTaggedFuncGraphs();
// first output grapn on the analysis stack
// first output graph on the analysis stack
for (const auto& node_cfg : node_cfgs) {
auto fg = node_cfg->context()->func_graph();
// the graph is already output, skip it
@ -291,7 +291,7 @@ void AnalyzedFuncGraphExporter::ExportFuncGraph(const std::string& filename,
ofs << "\n\n";
(void)func_graph_set.erase(fg);
}
ofs << "# num of total funcgraphs: " << exported.size();
ofs << "# num of total function graphs: " << exported.size();
ofs.close();
}
@ -332,7 +332,7 @@ void GetInferStackInfo(std::ostringstream& oss) {
MS_LOG(INFO) << "Get graph analysis information *end*";
}
// trace the graph evaluator statck
// trace the graph evaluator stack
static std::stack<std::pair<abstract::EvaluatorPtr, abstract::AnfNodeConfigPtr>> graph_infer_stack;
// trace the cnode infer debug info
static std::vector<abstract::AnfNodeConfigPtr> cnode_debug_stack{};

View File

@ -36,6 +36,6 @@ std::string TraceInfo::GetActionBetweenNode(const DebugInfoPtr& info) {
} else if (debug_info()->trace_info() != nullptr) {
return act_name + debug_info()->trace_info()->GetActionBetweenNode(info);
}
return "not in the traced info";
return "Not in the traced info";
}
} // namespace mindspore

View File

@ -83,7 +83,7 @@ class AnfVisitor;
// Methods:
// func_graph: return FuncGraph that this AnfNode belongs to.
// scope: return the scope namespace of this AnfNode. Set it using set_scope.
// abstract: return the cached inferred abstract value. It cantains type, shape
// abstract: return the cached inferred abstract value. It contains type, shape
// value. Set New cache using set_abstract.
// intermediate_abstract: return the cached inferring abstract value.
// Type/Shape: return the related info of this AnfNode. When this AnfNode is an
@ -284,7 +284,7 @@ class Parameter : public ANode {
};
using ParameterPtr = std::shared_ptr<Parameter>;
// Value is used to represent the atomic expression metioned in BNF.
// Value is used to represent the atomic expression mentioned in BNF.
// It mainly be stored in ValueNode. Value and ValueNode is related definition.
class Value : public Base {
public:
@ -313,7 +313,7 @@ using ValuePtr = std::shared_ptr<Value>;
using ValuePtrList = std::vector<ValuePtr>;
// ValueNode is used to hold value. Unlike CNode and Parameter, ValueNode
// do not belong to any particular function graph.
// does not belong to any particular function graph.
class ValueNode : public ANode {
public:
explicit ValueNode(const ValuePtr &value) : value_(value) {}

View File

@ -34,19 +34,19 @@ bool Number::operator==(const Type& other) const {
Int::Int(const int nbits) : Number(IntBitsToTypeId(nbits), nbits, false) {
if (nbits != 8 && nbits != 16 && nbits != 32 && nbits != 64) {
MS_LOG(EXCEPTION) << "wrong number of bits.";
MS_LOG(EXCEPTION) << "Wrong number of bits.";
}
}
UInt::UInt(const int nbits) : Number(UIntBitsToTypeId(nbits), nbits, false) {
if (nbits != 8 && nbits != 16 && nbits != 32 && nbits != 64) {
MS_LOG(EXCEPTION) << "wrong number of bits.";
MS_LOG(EXCEPTION) << "Wrong number of bits.";
}
}
Float::Float(const int nbits) : Number(FloatBitsToTypeId(nbits), nbits, false) {
if (nbits != 16 && nbits != 32 && nbits != 64) {
MS_LOG(EXCEPTION) << "wrong number of bits.";
MS_LOG(EXCEPTION) << "Wrong number of bits.";
}
}

View File

@ -37,7 +37,7 @@ TypeId IntBitsToTypeId(const int nbits) {
case 64:
return kNumberTypeInt64;
default:
MS_LOG(EXCEPTION) << "wrong number of bits.";
MS_LOG(EXCEPTION) << "Wrong number of bits.";
}
}
@ -52,7 +52,7 @@ TypeId UIntBitsToTypeId(const int nbits) {
case 64:
return kNumberTypeUInt64;
default:
MS_LOG(EXCEPTION) << "wrong number of bits.";
MS_LOG(EXCEPTION) << "Wrong number of bits.";
}
}
@ -65,7 +65,7 @@ TypeId FloatBitsToTypeId(const int nbits) {
case 64:
return kNumberTypeFloat64;
default:
MS_LOG(EXCEPTION) << "wrong number of bits.";
MS_LOG(EXCEPTION) << "Wrong number of bits.";
}
}

View File

@ -174,7 +174,7 @@ class FuncGraph : public FuncGraphBase {
GraphDebugInfoPtr debug_info();
void set_debug_info(const GraphDebugInfoPtr &info) {
if (info == nullptr) {
MS_LOG(EXCEPTION) << "graph set null debug info";
MS_LOG(EXCEPTION) << "Graph set null debug info";
}
this->debug_info_ = info;
}

View File

@ -817,7 +817,7 @@ void FuncGraphChildDirect::OnMoveAllCNode(FuncGraphPtr src, FuncGraphPtr dst) {
void FuncGraphParentsDirectCollector::OnModEdge(AnfNodePtr node, int, AnfNodePtr inp, EdgeProcessDirection direction) {
MS_EXCEPTION_IF_NULL(node);
FuncGraphPtr fg1 = node->func_graph();
// possible chirld parent
// possible child parent
if (IsValueNode<FuncGraph>(inp)) {
FuncGraphPtr fg2 = GetValueNode<FuncGraphPtr>(inp);
if (Mod(fg1, ParentProxy(fg2), direction)) {
@ -1181,7 +1181,7 @@ bool FuncGraphJTotalComputer::SeekJ(const FuncGraphPtr& fg, const FuncGraphSetPt
}
path->add(fg);
// checkg if func graphs used contains J(func_graph);
// check if func graphs used contains J(func_graph);
auto& used = this->manager_->func_graphs_used();
for (auto& item : used[fg]) {
auto used_g = item.first;

View File

@ -650,7 +650,7 @@ class FuncGraphTransaction {
explicit FuncGraphTransaction(FuncGraphManager* manager) : manager_(manager), changes_() {
MS_EXCEPTION_IF_NULL(manager_);
if (!manager_->IsManaged()) {
MS_LOG(DEBUG) << "the manager is not managed yet";
MS_LOG(DEBUG) << "The manager is not managed yet";
}
}

View File

@ -148,7 +148,7 @@ class MetaTensor : public Value {
//
// The constructed MetaTensor object has the same type and shape with meta_tensor.
//
// param meta_tensor An exisiting MetaTensor object.
// param meta_tensor An existing MetaTensor object.
virtual MetaTensor& operator=(const MetaTensor& meta_tensor);
// brief Compares two MetaTensor objects.
@ -166,7 +166,7 @@ class MetaTensor : public Value {
TypeId data_type() const { return data_type_; }
std::string ToString() const override;
std::string DumpText() const override;
// bried Sets the data type of a tensor in its MetaTensor.
// brief Sets the data type of a tensor in its MetaTensor.
//
// param data_type The data type of the tensor to be set.
virtual TypeId set_data_type(const TypeId data_type) {
@ -314,7 +314,7 @@ class Tensor : public MetaTensor {
//
// The constructed Tensor object has the same type and shape with tensor.
//
// param tensor An exisiting Tensor object.
// param tensor An existing Tensor object.
Tensor& operator=(const Tensor& tensor);
// brief Compares two Tensor objects.
@ -383,7 +383,7 @@ class Tensor : public MetaTensor {
// return The [TypeId] of the tensor data.
TypeId GetDataType(const py::buffer_info& buf) const;
// bried Sets the data type of a tensor.
// brief Sets the data type of a tensor.
//
// param data_type The data type of the tensor to be set.
//

View File

@ -43,14 +43,13 @@ VisitFuncType AnfVisitor::Match(const PrimitivePtr &prim, const std::vector<opt:
}
auto &inputs = node->cast<CNodePtr>()->inputs();
// infact, funcs_size == inps_size - 1
auto funcs_size = funcs.size();
auto inps_size = inputs.size();
auto inputs_size = inputs.size();
// check the inputs are matched with the predicate functions
if (funcs_size > 0) {
// use the predicate function list to check the number of inputs
if (funcs_size != (inps_size - 1)) {
if (funcs_size != (inputs_size - 1)) {
return;
}
@ -63,7 +62,7 @@ VisitFuncType AnfVisitor::Match(const PrimitivePtr &prim, const std::vector<opt:
}
// visit the inputs
for (size_t i = 1; i < inps_size; i++) {
for (size_t i = 1; i < inputs_size; i++) {
this->Visit(inputs[i]);
}
};

View File

@ -36,7 +36,7 @@
#endif
const char SINGLE_OP_GRAPH[] = "single_op_graph";
// primitive unable to infer value for constant input in pynative mode
// primitive unable to infer value for constant input in PyNative mode
const std::unordered_set<std::string> vm_operators = {"partial", "depend"};
namespace mindspore {
@ -45,7 +45,7 @@ inline ValuePtr PyAttrValue(const py::object& obj) {
ValuePtr converted_ret = nullptr;
bool converted = parse::ConvertData(obj, &converted_ret);
if (!converted) {
MS_LOG(EXCEPTION) << "attribute convert error with type:" << std::string(py::str(obj));
MS_LOG(EXCEPTION) << "Attribute convert error with type:" << std::string(py::str(obj));
}
return converted_ret;
}
@ -67,7 +67,7 @@ void PynativeInfer(const PrimitivePyPtr& prim, const py::tuple& py_args, OpExecI
OpExecInfoPtr GenerateOpExecInfo(const py::args& args) {
if (args.size() != PY_ARGS_NUM) {
MS_LOG(ERROR) << "four args are needed by RunOp";
MS_LOG(ERROR) << "Four args are needed by RunOp";
return nullptr;
}
auto op_exec_info = std::make_shared<OpExecInfo>();
@ -145,13 +145,13 @@ py::object RunOpInVM(const OpExecInfoPtr& op_exec_info, PynativeStatusCode* stat
py::object RunOpInMs(const OpExecInfoPtr& op_exec_info, PynativeStatusCode* status) {
MS_EXCEPTION_IF_NULL(op_exec_info);
MS_LOG(INFO) << "start run op[" << op_exec_info->op_name << "] with backend policy ms";
MS_LOG(INFO) << "Start run op[" << op_exec_info->op_name << "] with backend policy ms";
auto ms_context = MsContext::GetInstance();
MS_EXCEPTION_IF_NULL(ms_context);
ms_context->set_enable_pynative_infer(true);
std::string device_target = ms_context->device_target();
if (device_target != kAscendDevice && device_target != kGPUDevice) {
MS_EXCEPTION(ArgumentError) << "device target [" << device_target << "] is not supported in Pynative mode";
MS_EXCEPTION(ArgumentError) << "Device target [" << device_target << "] is not supported in Pynative mode";
}
std::shared_ptr<session::SessionBasic> session = session::SessionFactory::Get().Create(device_target);
MS_EXCEPTION_IF_NULL(session);
@ -197,7 +197,7 @@ py::object RunOpWithBackendPolicy(MsBackendPolicy backend_policy, const OpExecIn
break;
}
default:
MS_LOG(ERROR) << "No backend configed for run op";
MS_LOG(ERROR) << "No backend configured for run op";
}
return result;
}
@ -240,7 +240,7 @@ py::tuple RunOp(const py::args& args) {
}
result = RunOpWithBackendPolicy(backend_policy, op_exec_info, &status);
if (status != PYNATIVE_SUCCESS) {
MS_LOG(ERROR) << "Fail to run " << op_exec_info->op_name;
MS_LOG(ERROR) << "Failed to run " << op_exec_info->op_name;
return err_ret;
}

View File

@ -47,7 +47,7 @@ inline ValuePtr PyAttrValue(const py::object& obj) {
ValuePtr converted_ret = nullptr;
bool converted = parse::ConvertData(obj, &converted_ret);
if (!converted) {
MS_LOG(EXCEPTION) << "attribute convert error with type:" << std::string(py::str(obj));
MS_LOG(EXCEPTION) << "Attribute convert error with type:" << std::string(py::str(obj));
}
return converted_ret;
}
@ -67,7 +67,7 @@ MeTensorPtr ConvertPyObjToTensor(const py::object& obj) {
} else if (py::isinstance<py::array>(obj)) {
me_tensor_ptr = std::make_shared<MeTensor>(py::cast<py::array>(obj), nullptr);
} else {
MS_LOG(EXCEPTION) << "run op inputs type is invalid!";
MS_LOG(EXCEPTION) << "Run op inputs type is invalid!";
}
return me_tensor_ptr;
}
@ -97,7 +97,7 @@ bool SetInputsForSingleOpGraph(const OpExecInfoPtr& op_exec_info, const std::vec
auto const_op_desc =
transform::TransformUtil::GetGeTensorDesc(me_tensor_ptr->shape_c(), me_tensor_ptr->data_type(), kOpFormat_NCHW);
if (const_op_desc == nullptr) {
MS_LOG(ERROR) << "Create variable " << op_name << " ouptut descriptor failed!";
MS_LOG(ERROR) << "Create variable " << op_name << " output descriptor failed!";
return false;
}
auto pointer_cast_const_op = std::static_pointer_cast<transform::Constant>(const_op);
@ -108,7 +108,7 @@ bool SetInputsForSingleOpGraph(const OpExecInfoPtr& op_exec_info, const std::vec
continue;
}
if (adapter->setInput(op, op_input_idx++, const_op)) {
MS_LOG(ERROR) << "fail to set params, index is " << op_input_idx;
MS_LOG(ERROR) << "Failed to set params, index is " << op_input_idx;
return false;
}
graph_input_nodes->push_back(*const_op);
@ -178,7 +178,7 @@ void ToTensorPtr(const OpExecInfoPtr op_exec_info, std::vector<GeTensorPtr>* con
MeTensorPtr me_tensor_ptr = ConvertPyObjToTensor(op_inputs[i]);
auto ge_tensor_ptr = transform::TransformUtil::ConvertTensor(me_tensor_ptr, kOpFormat_NCHW);
if (ge_tensor_ptr == nullptr) {
MS_LOG(EXCEPTION) << "convert inputs to GE tensor failed in op " << op_exec_info->op_name << ".";
MS_LOG(EXCEPTION) << "Convert inputs to GE tensor failed in op " << op_exec_info->op_name << ".";
}
// set inputs for operator to build single node graph
inputs->push_back(ge_tensor_ptr);
@ -192,7 +192,7 @@ PynativeStatusCode ConvertAttributes(const OpExecInfoPtr& op_exec_info, const st
for (auto& item : op_attrs) {
if (!py::isinstance<py::str>(item.first)) {
MS_LOG(ERROR) << "type error in py dict convert";
MS_LOG(ERROR) << "Type error in py dict convert";
return PYNATIVE_OP_ATTRS_ERR;
}
std::string name = py::cast<std::string>(item.first);
@ -203,7 +203,7 @@ PynativeStatusCode ConvertAttributes(const OpExecInfoPtr& op_exec_info, const st
// build graph
GeGraphPtr graph = std::make_shared<GeGraph>(op_exec_info->op_name);
if (BuildSingleOpGraph(op_exec_info, inputs, attrs, graph) == false) {
MS_LOG(ERROR) << "Fail to BuildSingleOpGraph";
MS_LOG(ERROR) << "Failed to BuildSingleOpGraph";
return PYNATIVE_GRAPH_GE_BUILD_ERR;
}
@ -211,7 +211,7 @@ PynativeStatusCode ConvertAttributes(const OpExecInfoPtr& op_exec_info, const st
transform::Status ret =
transform::DfGraphManager::GetInstance().AddGraph(SINGLE_OP_GRAPH, std::shared_ptr<transform::DfGraph>(graph));
if (ret != transform::SUCCESS) {
MS_LOG(ERROR) << "Fail to AddGraph into graph manager";
MS_LOG(ERROR) << "Failed to AddGraph into graph manager";
return PYNATIVE_GRAPH_MANAGER_ERR;
}
@ -289,7 +289,7 @@ py::object RunOpInGE(const OpExecInfoPtr& op_exec_info, PynativeStatusCode* stat
run_ret = graph_runner->RunGraph(run_options, ge_inputs, &ge_outputs);
}
if (run_ret != transform::Status::SUCCESS) {
MS_LOG(ERROR) << "GraphRunner Fails to Run Graph";
MS_LOG(ERROR) << "GraphRunner fails to run graph";
*status = PYNATIVE_GRAPH_GE_RUN_ERR;
return std::move(err_ret);
}