forked from mindspore-Ecosystem/mindspore
!22315 clean code
Merge pull request !22315 from hwjiaorui/clean-code-1.3
This commit is contained in:
commit
69555c4f68
|
@ -916,7 +916,7 @@ std::vector<int64_t> CalDimOffset(const std::vector<int64_t> &input_shape) {
|
|||
return dim_offset;
|
||||
}
|
||||
|
||||
size_t CalOffset(const std::vector<int64_t> &start, const std::vector<int64_t> &stop, const std::vector<int64_t> &step,
|
||||
size_t CalOffset(const std::vector<int64_t> &start, const std::vector<int64_t> &stop,
|
||||
const std::vector<int64_t> &dim_offset) {
|
||||
size_t size = start.size();
|
||||
size_t offset = 0;
|
||||
|
|
|
@ -137,7 +137,7 @@ inline T ComputeLerp(T top_left, T top_right, T bottom_left, T bottom_right, T x
|
|||
void CastShapeSizeToLong(const std::vector<size_t> &shape, std::vector<int64_t> *long_shape);
|
||||
void CheckSliceValid(const std::vector<int64_t> &start, const std::vector<int64_t> &stop,
|
||||
const std::vector<int64_t> &step, const std::vector<int64_t> &input_shape);
|
||||
size_t CalOffset(const std::vector<int64_t> &start, const std::vector<int64_t> &stop, const std::vector<int64_t> &step,
|
||||
size_t CalOffset(const std::vector<int64_t> &start, const std::vector<int64_t> &stop,
|
||||
const std::vector<int64_t> &dim_offset);
|
||||
std::vector<int64_t> CalDimOffset(const std::vector<int64_t> &input_shape);
|
||||
size_t GetCopySize(const std::vector<int64_t> &dim_offset, const std::vector<int64_t> &start,
|
||||
|
|
|
@ -41,7 +41,7 @@ void TensorCopySlicesCPUKernel::InitKernel(const CNodePtr &kernel_node) {
|
|||
data_type_ = AnfAlgo::GetInputDeviceDataType(kernel_node, 0);
|
||||
auto dim_offset = CalDimOffset(input_shape_);
|
||||
auto type_size = abstract::TypeIdSize(data_type_);
|
||||
offset_ = CalOffset(begin, end, stride, dim_offset) * type_size;
|
||||
offset_ = CalOffset(begin, end, dim_offset) * type_size;
|
||||
copy_size_ = GetCopySize(dim_offset, begin, end) * type_size;
|
||||
}
|
||||
|
||||
|
|
|
@ -90,7 +90,7 @@ class TensorCopySlicesGpuKernel : public GpuKernel {
|
|||
|
||||
CheckSliceValid(begin, end, strides, input_shapes_);
|
||||
auto dim_offset = CalDimOffset(input_shapes_);
|
||||
offset_ = CalOffset(begin, end, strides, dim_offset);
|
||||
offset_ = CalOffset(begin, end, dim_offset);
|
||||
copy_size_ = GetCopySize(dim_offset, begin, end) * sizeof(T);
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -76,7 +76,7 @@ bool TensorCopySlices::Init(const mindspore::AnfNodePtr &anf_node) {
|
|||
|
||||
CheckSliceValid(begin, end, strides, input_shape_);
|
||||
auto dim_offset = CalDimOffset(input_shape_);
|
||||
offset_ = CalOffset(begin, end, strides, dim_offset) * abstract::TypeIdSize(input_type_id_);
|
||||
offset_ = CalOffset(begin, end, dim_offset) * abstract::TypeIdSize(input_type_id_);
|
||||
copy_size_ = GetCopySize(dim_offset, begin, end) * abstract::TypeIdSize(input_type_id_);
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -527,14 +527,14 @@ class CallInfoFinder {
|
|||
// Search recursive call from a call-site.
|
||||
void SearchRecursiveCall(const KernelGraphPtr &start_caller, CallSite *start_site) {
|
||||
SearchRecursiveContext context{.start_caller = start_caller, .start_site = start_site};
|
||||
DoSearchRecursiveCall(start_caller, start_site, &context);
|
||||
DoSearchRecursiveCall(start_caller, *start_site, &context);
|
||||
}
|
||||
|
||||
void DoSearchRecursiveCall(const KernelGraphPtr &graph, CallSite *call_site, SearchRecursiveContext *ctx) {
|
||||
void DoSearchRecursiveCall(const KernelGraphPtr &graph, const CallSite &call_site, SearchRecursiveContext *ctx) {
|
||||
// Record call path.
|
||||
ctx->call_path.push_back(graph);
|
||||
// Handle callee graphs.
|
||||
for (auto &callee : call_site->callees) {
|
||||
for (auto &callee : call_site.callees) {
|
||||
auto &sub_graph = callee.graph;
|
||||
if (sub_graph == ctx->start_caller) {
|
||||
// Find a recursive call path.
|
||||
|
@ -557,7 +557,7 @@ class CallInfoFinder {
|
|||
auto &sites = call_info.call_sites;
|
||||
for (auto &site : sites) {
|
||||
if (!site.callees.empty()) {
|
||||
DoSearchRecursiveCall(sub_graph, &site, ctx);
|
||||
DoSearchRecursiveCall(sub_graph, site, ctx);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -523,7 +523,7 @@ void KernelRuntime::AssignCommunicationNodeOutputMem(MemType type, const AnfNode
|
|||
return;
|
||||
}
|
||||
if (context_ptr->get_param<bool>(MS_CTX_ENABLE_HCCL)) {
|
||||
mem_size = mem_manager_->GetCommonAlignSize(mem_size);
|
||||
mem_size = MemoryManager::GetCommonAlignSize(mem_size);
|
||||
}
|
||||
total_size += mem_size;
|
||||
align_size_list.emplace_back(mem_size);
|
||||
|
@ -610,7 +610,7 @@ void KernelRuntime::AssignCommunicationNodeInputMem(MemType type, const AnfNodeP
|
|||
MS_LOG(EXCEPTION) << "Communication node inputs only support CNode";
|
||||
}
|
||||
MS_EXCEPTION_IF_NULL(address);
|
||||
auto mem_size = mem_manager_->GetCommonAlignSize(address->size());
|
||||
auto mem_size = MemoryManager::GetCommonAlignSize(address->size());
|
||||
total_size += mem_size;
|
||||
addr_size.emplace_back(address, mem_size);
|
||||
}
|
||||
|
@ -955,7 +955,7 @@ void KernelRuntime::GenAddrCleanLaunchArgs(const CNodePtr &cnode, AddressPtrList
|
|||
}
|
||||
|
||||
void KernelRuntime::LaunchKernelEvent(const std::vector<std::vector<std::function<void()>>> &kernel_events,
|
||||
size_t index) {
|
||||
size_t index) const {
|
||||
if (index >= kernel_events.size()) {
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -130,7 +130,7 @@ class KernelRuntime {
|
|||
private:
|
||||
void AssignStaticMemoryOutput(const session::KernelGraph *graph);
|
||||
bool LaunchKernelMod(const session::KernelGraph &graph);
|
||||
void LaunchKernelEvent(const std::vector<std::vector<std::function<void()>>> &run_events, size_t index);
|
||||
void LaunchKernelEvent(const std::vector<std::vector<std::function<void()>>> &run_events, size_t index) const;
|
||||
static void GenAddrCleanLaunchArgs(const CNodePtr &cnode, AddressPtrList *kernel_inputs);
|
||||
void RunOpAssignInputMemory(const std::vector<tensor::TensorPtr> &input_tensors, const session::KernelGraph *graph);
|
||||
void RunOpAssignOutputMemory(const AnfNodePtr &kernel);
|
||||
|
|
Loading…
Reference in New Issue