Merge pull request !21114 from hwjiaorui/clean-code-master
This commit is contained in:
i-robot 2021-07-31 06:17:58 +00:00 committed by Gitee
commit 9c692ecdc0
13 changed files with 27 additions and 31 deletions

View File

@ -585,7 +585,6 @@ std::vector<std::pair<AnfNodePtr, std::pair<size_t, size_t>>> GetInputIndex(cons
auto const &input = input_list[i];
MS_EXCEPTION_IF_NULL(input);
bool found = false;
// using NodeUsersMap = std::unordered_map<AnfNodePtr, std::set<std::pair<AnfNodePtr, int>>>;
auto mng = input->func_graph()->manager();
MS_EXCEPTION_IF_NULL(mng);
const NodeUsersMap &users = mng->node_users();
@ -617,7 +616,7 @@ std::vector<std::pair<AnfNodePtr, std::pair<size_t, size_t>>> GetInputIndex(cons
int accum_idx = 0;
size_t dyn_i = 0;
for (; dyn_i < dyn_input_sizes.size(); ++dyn_i) {
accum_idx += dyn_input_sizes[dyn_i];
accum_idx += LongToInt(dyn_input_sizes[dyn_i]);
if (used_as_idx < accum_idx) {
input_index.push_back(std::make_pair(
anf_node, std::make_pair(dyn_i, IntToSize(used_as_idx - (accum_idx - dyn_input_sizes[dyn_i])))));
@ -960,10 +959,10 @@ size_t GetCopySize(const std::vector<int64_t> &dim_offset, const std::vector<int
const std::vector<int64_t> &stop) {
for (size_t i = 0; i < start.size(); ++i) {
if (stop[i] - start[i] != 1) {
return (stop[i] - start[i]) * dim_offset[i];
return SizetMulWithOverflowCheck(LongToSize(stop[i] - start[i]), LongToSize(dim_offset[i]));
}
}
return dim_offset[start.size() - 1];
return LongToSize(dim_offset[start.size() - 1]);
}
std::vector<int64_t> CalDimOffset(const std::vector<int64_t> &input_shape) {
@ -982,7 +981,7 @@ size_t CalOffset(const std::vector<int64_t> &start, const std::vector<int64_t> &
size_t size = start.size();
size_t offset = 0;
for (size_t i = 0; i < size; ++i) {
offset += dim_offset[i] * start[i];
offset += SizetMulWithOverflowCheck(LongToSize(dim_offset[i]), start[i]);
if (stop[i] - start[i] != 1) {
break;
}

View File

@ -32,7 +32,7 @@ LabelSwitchKernel::LabelSwitchKernel() {
label_size_ = 0;
}
LabelSwitchKernel::~LabelSwitchKernel() {}
LabelSwitchKernel::~LabelSwitchKernel() { cond_ = nullptr; }
bool LabelSwitchKernel::Init(const AnfNodePtr &anf_node) {
MS_EXCEPTION_IF_NULL(anf_node);

View File

@ -30,6 +30,7 @@
using mindspore::ge::model_runner::MemcpyAsyncTaskInfo;
namespace mindspore {
namespace kernel {
constexpr auto kTensorCopySlicesInputSize = 2;
TensorCopySlices::TensorCopySlices() {}
TensorCopySlices::~TensorCopySlices() {}
@ -102,19 +103,19 @@ void TensorCopySlices::GetInputOutputInfo(const AnfNodePtr &anf_node) {
CastShapeSizeToLong(output_shape, &output_shape_);
}
void *TensorCopySlices::VoidPointerOffset(void *ptr, size_t offset) {
void *TensorCopySlices::VoidPointerOffset(void *ptr, size_t offset) const {
return reinterpret_cast<uint8_t *>(ptr) + offset;
}
void TensorCopySlices::GetInputOutputTotalCount(const AnfNodePtr &anf_node) {
MS_EXCEPTION_IF_NULL(anf_node);
size_t input_size = AnfAlgo::GetInputTensorNum(anf_node);
if (input_size != 2) {
if (input_size != kTensorCopySlicesInputSize) {
MS_LOG(EXCEPTION) << "TensorCopySlices input size is not 2";
}
auto input_shape = AnfAlgo::GetInputDeviceShape(anf_node, 0);
size_t total_size = std::accumulate(input_shape.begin(), input_shape.end(), 1, std::multiplies<>());
size_t total_size = std::accumulate(input_shape.begin(), input_shape.end(), (size_t)1, std::multiplies<>());
total_size *= abstract::TypeIdSize(input_type_id_);
MS_LOG(INFO) << "TensorCopySlices size[" << total_size << "]";
// Shape and DType of input0 and output0 are same.
@ -122,7 +123,7 @@ void TensorCopySlices::GetInputOutputTotalCount(const AnfNodePtr &anf_node) {
output_size_list_.emplace_back(total_size);
auto update_shape = AnfAlgo::GetInputDeviceShape(anf_node, 1);
size_t update_size = std::accumulate(update_shape.begin(), update_shape.end(), 1, std::multiplies<>());
size_t update_size = std::accumulate(update_shape.begin(), update_shape.end(), (size_t)1, std::multiplies<>());
update_size *= abstract::TypeIdSize(update_type_id_);
input_size_list_.emplace_back(update_size);
}

View File

@ -38,7 +38,7 @@ class TensorCopySlices : public RtKernel {
private:
void GetInputOutputInfo(const AnfNodePtr &anf_node);
void GetInputOutputTotalCount(const AnfNodePtr &anf_node);
void *VoidPointerOffset(void *ptr, size_t offset);
void *VoidPointerOffset(void *ptr, size_t offset) const;
std::vector<int64_t> input_shape_;
std::vector<int64_t> update_shape_;

View File

@ -682,10 +682,6 @@ bool TbeKernelJsonCreator::ParseAttrValue(const std::string &type, const mindspo
} else if (type == kVTypeListFloat) {
std::vector<float> attr_value;
auto value_type = value->type();
if (!attr_obj) {
MS_LOG(ERROR) << "attr_obj ptr is null.";
return false;
}
auto value_type_str = value_type->ToString();
if (value_type_str == kVTypeFloat) {
auto data = GetValue<float>(value);

View File

@ -60,6 +60,7 @@ namespace mindspore {
namespace device {
namespace ascend {
DataDumper::~DataDumper() {
kernel_graph_ = nullptr;
ReleaseDevMem(&dev_load_mem_);
ReleaseDevMem(&dev_unload_mem_);
ReleaseDevMem(&op_debug_buffer_addr_);
@ -221,7 +222,7 @@ void DataDumper::UnloadDumpInfo() {
RtLoadDumpData(op_mapping_info, &dev_unload_mem_);
}
void DataDumper::ReleaseDevMem(void **ptr) const {
void DataDumper::ReleaseDevMem(void **ptr) const noexcept {
if (ptr == nullptr) {
return;
}
@ -357,7 +358,7 @@ void DataDumper::RtLoadDumpData(const aicpu::dump::OpMappingInfo &dump_info, voi
}
MS_LOG(INFO) << "[DataDump] rtDatadumpInfoLoad start";
rt_ret = rtDatadumpInfoLoad(*ptr, proto_size);
rt_ret = rtDatadumpInfoLoad(*ptr, SizeToUint(proto_size));
if (rt_ret != RT_ERROR_NONE) {
MS_LOG(EXCEPTION) << "[DataDump] Call rtDatadumpInfoLoad failed";
}

View File

@ -59,7 +59,7 @@ class DataDumper {
void OpDebugUnregister();
private:
void ReleaseDevMem(void **ptr) const;
void ReleaseDevMem(void **ptr) const noexcept;
bool KernelNeedDump(const CNodePtr &kernel) const;
void SetOpMappingInfo(NotNull<aicpu::dump::OpMappingInfo *> dump_info) const;
void SetOpDebugMappingInfo(const NotNull<aicpu::dump::OpMappingInfo *> dump_info) const;

View File

@ -53,7 +53,7 @@ void AiCpuDynamicKernel::Execute() {
MS_LOG(INFO) << "Execute AiCpuDynamicKerenl Start";
auto ret = rtCpuKernelLaunchWithFlag(
reinterpret_cast<const void *>(so_name_.c_str()), reinterpret_cast<const void *>(kernel_name_.c_str()), 1,
reinterpret_cast<const void *>(args_.data()), args_.size(), nullptr, stream_, RT_KERNEL_DEFAULT);
reinterpret_cast<const void *>(args_.data()), SizeToUint(args_.size()), nullptr, stream_, RT_KERNEL_DEFAULT);
if (ret != RT_ERROR_NONE) {
MS_LOG(EXCEPTION) << "Call rtCpuKernelLaunchWithFlag Failed";
}
@ -101,7 +101,7 @@ void AiCpuDynamicKernel::Initialize() {
}
auto aicpu_param_head = reinterpret_cast<kernel::AicpuParamHead *>(args_.data());
aicpu_param_head->extInfoLength = ext_info_size_;
aicpu_param_head->extInfoLength = SizeToUint(ext_info_size_);
aicpu_param_head->extInfoAddr = reinterpret_cast<uint64_t>(ext_info_addr_dev_);
}
@ -182,7 +182,7 @@ bool AiCpuDynamicKernel::UpdateOutputShapeFromExtInfo() {
MS_LOG(INFO) << "Get output:" << output_num_ << " Shape";
std::vector<int64_t> shape;
TypeId type_id;
ext_info_handler_->GetOutputShapeAndType(i, NOT_NULL(&shape), NOT_NULL(&type_id));
ext_info_handler_->GetOutputShapeAndType(SizeToUint(i), NOT_NULL(&shape), NOT_NULL(&type_id));
for (auto x : shape) {
MS_LOG(INFO) << "Update output:" << i << " shape:" << x;

View File

@ -164,7 +164,7 @@ bool AicpuExtInfoHandler::UpdateOutputShapeAndType(uint32_t output_index, const
for (size_t i = 0; i < shape.size(); ++i) {
if (i < max_shape.size() && shape[i] == SIZE_MAX) {
MS_LOG(INFO) << "Node:" << node_name_ << " update shape from SIZE_MAX to " << max_shape[i];
shape[i] = max_shape[i];
shape[i] = LongToSize(max_shape[i]);
}
}

View File

@ -97,7 +97,7 @@ void HcclDynamicKernel::Execute() {
op_info.outputPtr = output_ptr_;
op_info.dataType = static_cast<HcclDataType>(data_type_);
op_info.opType = static_cast<HcclReduceOp>(op_type_);
op_info.root = root_;
op_info.root = IntToUint(root_);
op_info.count = count_;
auto callback = [this](HcclResult status) {

View File

@ -132,9 +132,9 @@ void FeedTeOpConstTensor(const NotNull<CNodePtr> &cnode, const std::map<uint32_t
}
auto input_name = input_names_attr[index];
MS_LOG(INFO) << "input_name is " << input_name;
auto type_id = AnfAlgo::GetPrevNodeOutputDeviceDataType(cnode.get(), index);
auto shape = AnfAlgo::GetPrevNodeOutputInferShape(cnode.get(), index);
auto format = AnfAlgo::GetPrevNodeOutputFormat(cnode.get(), index);
auto type_id = AnfAlgo::GetPrevNodeOutputDeviceDataType(cnode.get(), IntToSize(index));
auto shape = AnfAlgo::GetPrevNodeOutputInferShape(cnode.get(), IntToSize(index));
auto format = AnfAlgo::GetPrevNodeOutputFormat(cnode.get(), IntToSize(index));
const_inputs->try_emplace(
input_name,
optiling::TeConstTensorData{static_cast<const uint8_t *>(const_tensor->data_c()),

View File

@ -40,6 +40,7 @@ constexpr char kIterEndNode[] = "PROFILING_ITER_END";
constexpr uint64_t kProfilingFpStartLogId = 1;
constexpr uint64_t kProfilingBpEndLogId = 2;
constexpr uint64_t kProfilingIterEndLogId = 65535;
constexpr auto kDouble = 2;
nlohmann::json GetContextProfilingOption() {
auto context = MsContext::GetInstance();
@ -88,8 +89,8 @@ void ProfilingUtils::GetTraceCustomNode(ProfilingTraceInfo *trace_info) {
MS_EXCEPTION_IF_NULL(trace_info);
for (uint32_t i = 1; i <= kMaxProfilingNodeNum; ++i) {
std::string env_str = std::string(kCustomNode) + std::to_string(i);
const char *node_full_name = std::getenv(env_str.c_str());
if (node_full_name == nullptr) {
auto node_full_name = common::GetEnv(env_str);
if (node_full_name.empty()) {
break;
}
MS_LOG(INFO) << "Get custom profiling node:" << node_full_name;
@ -334,7 +335,7 @@ void ProfilingUtils::InsertProfilingCustomOp(const AnfNodePtr &anf_node, const P
}
MS_LOG(INFO) << "Profiling graph:" << graph_ptr->graph_id() << " Match CustomOp:" << anf_node->fullname_with_scope();
// custom op profiling job start from 3.
auto custom_point_id = 2 * custom_node_index_ + 1;
auto custom_point_id = kDouble * custom_node_index_ + 1;
ProfilingContent front_profiling_content = {false, custom_point_id, 0};
CNodePtr front_node = CreateProfilingCNodeWithStream(anf_node, front_profiling_content, graph_ptr);
kernel_list->insert(kernel_list->end() - 1, front_node);

View File

@ -272,7 +272,6 @@ bool TaskGenerator::LaunchAllKernel(const std::vector<CNodePtr> &anf_node_list,
#ifdef ENABLE_DUMP_IR
void TaskGenerator::DumpTaskInfo(const string &real_filename,
const std::vector<TaskDebugInfoPtr> &task_debug_info_list) {
OrderedMap<AnfNodePtr, int32_t> para_map;
ChangeFileMode(real_filename, S_IRWXU);
SaveTaskDebugInfoToFile(real_filename, task_debug_info_list);
// set file mode to read only by user
@ -295,7 +294,6 @@ void TaskGenerator::DumpTaskInfo(const std::string &real_filename) {
}
#endif
OrderedMap<AnfNodePtr, int32_t> para_map;
std::string path_string = real_path;
ChangeFileMode(path_string, S_IRWXU);
SaveTaskDebugInfoToFile(path_string, task_debug_info_list_);