!34069 Delete update dynamic shape function in device context

Merge pull request !34069 from zjun/delete_update_dynamic
This commit is contained in:
i-robot 2022-05-12 11:02:20 +00:00 committed by Gitee
commit 3f9e3ea572
No known key found for this signature in database
GPG Key ID: 173E9B9CA92EEF8F
13 changed files with 27 additions and 120 deletions

View File

@ -188,7 +188,6 @@ void DynamicShapeConvertPass(const std::shared_ptr<session::KernelGraph> &kernel
dynamic_shape_convert_pm->AddPass(std::make_shared<opt::dynamic_shape::LinkCustomOp>());
optimizer->AddPassManager(dynamic_shape_convert_pm);
(void)optimizer->Optimize(kernel_graph);
kernel_graph->set_attr(kAttrHasCustomOp, MakeValue(true));
#ifdef ENABLE_DUMP_IR
if (save_graphs) {
std::string file_name =

View File

@ -570,8 +570,6 @@ constexpr auto kActualAbstract = "actual_abstract";
// TODO(dsj): for ms_function running in graph_mode. should be delete later
constexpr auto kAttrMSFunction = "ms_function_graph";
// mark func_graph for the dynamic shape customop pass
constexpr auto kAttrHasCustomOp = "graph_has_custom_op";
// custom operator func type
constexpr auto kCustomTypeAOT = "aot";

View File

@ -39,7 +39,6 @@
#include "common/util/error_manager/error_manager.h"
#include "plugin/device/ascend/hal/device/ascend_memory_adapter.h"
#include "backend/common/optimizer/common_backend_optimization.h"
#include "backend/common/optimizer/dynamic_shape/dynamic_shape_helper.h"
#ifndef ENABLE_SECURITY
#include "debug/data_dump/dump_json_parser.h"
#include "toolchain/adx_datadump_server.h"
@ -748,20 +747,6 @@ void AscendDeviceContext::PreprocessBeforeRunSingleOpGraph(const KernelGraphPtr
LaunchDeviceLibrary();
}
void AscendDeviceContext::UpdateDynamicShape(const CNodePtr &kernel) const {
MS_EXCEPTION_IF_NULL(kernel);
if (!(common::AnfAlgo::GetBooleanAttr(kernel, kAttrMSFunction))) {
auto kernel_mod = AnfAlgo::GetKernelMod(kernel);
MS_EXCEPTION_IF_NULL(kernel_mod);
opt::dynamic_shape::InferOp(kernel);
auto args = kernel->user_data<kernel::KernelArgs>();
if (kernel_mod->Resize(args->op, args->inputs, args->outputs, args->depend_tensor_map) ==
kernel::KRET_RESIZE_FAILED) {
MS_LOG(EXCEPTION) << "Node " << kernel->fullname_with_scope() << " Resize failed.";
}
}
}
std::shared_ptr<Bucket> AscendDeviceContext::CreateBucket(uint32_t bucket_id, uint32_t bucket_size) const {
auto bucket = std::make_shared<AscendBucket>(bucket_id, bucket_size);
MS_EXCEPTION_IF_NULL(bucket);

View File

@ -69,9 +69,6 @@ class AscendDeviceContext : public DeviceContext {
// Adjust single op kernel graph before run graph, used in PyNative Mode.
void PreprocessBeforeRunSingleOpGraph(const KernelGraphPtr &graph) const override;
// Infer kernel shape and update abstract info for dynamic shape kernel.
void UpdateDynamicShape(const CNodePtr &kernel) const override;
// Relevant function to allocate and free device memory of raw ptr.
void *AllocateMemory(size_t size) const override;
void FreeMemory(void *ptr) const override;

View File

@ -304,34 +304,6 @@ void CPUDeviceContext::CreateKernel(const std::vector<CNodePtr> &nodes) const {
#endif
}
void CPUDeviceContext::UpdateDynamicShape(const CNodePtr &kernel) const {
MS_EXCEPTION_IF_NULL(kernel);
if (session::AnfRuntimeAlgorithm::GetKernelType(kernel) == KernelType::AKG_KERNEL) {
MS_LOG(EXCEPTION) << "Akg kernels do not support dynamic shape by now.";
}
auto kernel_mod = AnfAlgo::GetKernelMod(kernel);
MS_EXCEPTION_IF_NULL(kernel_mod);
auto func_graph = kernel->func_graph();
MS_EXCEPTION_IF_NULL(func_graph);
if (!(func_graph->has_attr(kAttrHasCustomOp) && GetValue<bool>(func_graph->get_attr(kAttrHasCustomOp)))) {
opt::dynamic_shape::InferOp(kernel);
auto args = kernel::GetArgsFromCNode(kernel);
if (kernel_mod->GetKernelModType() == kernel::KernelModType::NativeCpuKernelMod) {
auto update = kernel::AbstractArgsFromCNode(kernel);
if (args == nullptr) {
args = std::make_shared<kernel::KernelArgs>();
}
args->op = update.op;
update.depend_tensor_map = args->depend_tensor_map;
kernel::SetArgsToCNode(kernel, update);
}
if (kernel_mod->Resize(args->op, args->inputs, args->outputs, args->depend_tensor_map) ==
kernel::KRET_RESIZE_FAILED) {
MS_LOG(EXCEPTION) << "Node " << kernel->fullname_with_scope() << " Resize failed.";
}
}
}
void CPUDeviceContext::PreprocessBeforeRunGraph(const KernelGraphPtr &graph) const {
MS_EXCEPTION_IF_NULL(graph);

View File

@ -50,7 +50,6 @@ class CPUDeviceContext : public DeviceContext {
void OptimizeSingleOpGraph(const KernelGraphPtr &graph) const override;
void CreateKernel(const std::vector<CNodePtr> &nodes) const override;
void UpdateDynamicShape(const CNodePtr &kernel) const override;
void PreprocessBeforeRunGraph(const KernelGraphPtr &graph) const override;

View File

@ -442,35 +442,6 @@ void GPUDeviceContext::CreateKernel(const std::vector<CNodePtr> &nodes) const {
CreateGPUKernel(nodes);
}
void GPUDeviceContext::UpdateDynamicShape(const CNodePtr &kernel) const {
MS_EXCEPTION_IF_NULL(kernel);
if (session::AnfRuntimeAlgorithm::GetKernelType(kernel) == KernelType::AKG_KERNEL) {
MS_LOG(EXCEPTION) << "Akg kernel do not support dynamic shape: " << kernel->fullname_with_scope();
}
auto kernel_mod = AnfAlgo::GetKernelMod(kernel);
MS_EXCEPTION_IF_NULL(kernel_mod);
auto func_graph = kernel->func_graph();
MS_EXCEPTION_IF_NULL(func_graph);
if (!(func_graph->has_attr(kAttrHasCustomOp) && GetValue<bool>(func_graph->get_attr(kAttrHasCustomOp)))) {
opt::dynamic_shape::InferOp(kernel);
auto args = kernel::GetArgsFromCNode(kernel);
if (kernel_mod->GetKernelModType() == kernel::KernelModType::NativeGpuKernelMod) {
auto update = kernel::AbstractArgsFromCNode(kernel);
if (args == nullptr) {
args = std::make_shared<kernel::KernelArgs>();
}
args->op = update.op;
update.depend_tensor_map = args->depend_tensor_map;
kernel::SetArgsToCNode(kernel, update);
}
if (kernel_mod->Resize(args->op, args->inputs, args->outputs, args->depend_tensor_map) ==
kernel::KRET_RESIZE_FAILED) {
MS_LOG(EXCEPTION) << "Node " << kernel->fullname_with_scope() << " Resize failed.";
}
}
}
bool GPUDeviceContext::LaunchCustomFunc(const AnfNodePtr &kernel) const {
MS_EXCEPTION_IF_NULL(kernel);
auto custom_func = AnfUtils::GetCustomFunc(kernel);

View File

@ -56,9 +56,6 @@ class GPUDeviceContext : public DeviceContext {
void CreateKernel(const std::vector<CNodePtr> &nodes) const override;
// Infer kernel shape and update abstract info for dynamic shape kernel.
void UpdateDynamicShape(const CNodePtr &kernel) const override;
bool LaunchKernel(const CNodePtr &kernel, const std::vector<AddressPtr> &inputs,
const std::vector<AddressPtr> &workspace, const std::vector<AddressPtr> &outputs,
bool is_dynamic_shape) const override;

View File

@ -147,28 +147,6 @@ void KernelActor::FetchWorkspaceDeviceTensor() {
}
}
void KernelActor::RunOpControlWithInputTensor(AID *const input_control, OpContext<DeviceTensor> *const context,
const std::vector<TensorPtr> *input_tensors) {
MS_EXCEPTION_IF_NULL(context);
MS_EXCEPTION_IF_NULL(input_tensors);
auto &sequential_num = context->sequential_num_;
(void)input_op_controls_[sequential_num].emplace_back(input_control);
PushInputDeviceTensor(input_tensors);
// When all the inputs are collected, then allocate memory and callback launch.
if (CheckRunningCondition(context)) {
if (is_dynamic_shape_) {
device_contexts_[0]->UpdateDynamicShape(kernel_);
}
FetchOutputDeviceTensor(context);
if (memory_alloc_list_.size() > 0) {
SendMemoryAllocReq(context);
}
OnMemoryAllocFinish(context);
}
}
namespace {
void AllocateMemory(const std::vector<DeviceTensor *> &alloc_list, const DeviceContext *device_context,
OpContext<DeviceTensor> *const context, const std::string &actor_name) {

View File

@ -70,10 +70,6 @@ class KernelActor : public DebugAwareActor {
}
~KernelActor() override = default;
// The kernel actor run when receive the input control and input tensors, used in step mode.
void RunOpControlWithInputTensor(AID *const input_control, OpContext<DeviceTensor> *const context,
const std::vector<TensorPtr> *input_tensors);
// The memory related operation interface.
void SendMemoryAllocReq(OpContext<DeviceTensor> *const context) override;
void SendMemoryFreeReq(OpContext<DeviceTensor> *const context) override;

View File

@ -523,13 +523,6 @@ void GraphScheduler::Run(ActorSet *const actor_set, const std::vector<DeviceCont
MS_EXCEPTION_IF_NULL(op_context_setter);
#endif
if ((strategy == GraphExecutionStrategy::kStep) && IsSingleOpActorSet(actor_set)) {
actor_set->data_prepare_actor_->PrepareData(input_tensors, &op_context, GraphExecutionStrategy::kStep);
MS_EXCEPTION_IF_NULL(actor_set->kernel_actors_[0]);
actor_set->kernel_actors_[0]->RunOpControlWithInputTensor(nullptr, &op_context, &input_tensors_with_value_node);
return;
}
// Trigger data prepare actor running.
MS_EXCEPTION_IF_NULL(ActorMgr::GetActorMgrRef());
auto thread_pool = ActorMgr::GetActorMgrRef()->GetActorThreadPool();

View File

@ -105,10 +105,6 @@ class DeviceContext {
virtual void PreprocessBeforeRunGraph(const KernelGraphPtr &graph) const {}
// Adjust single op kernel graph before run graph, used in PyNative Mode.
virtual void PreprocessBeforeRunSingleOpGraph(const KernelGraphPtr &graph) const {}
// Infer kernel shape and update abstract info for dynamic shape kernel.
virtual void UpdateDynamicShape(const CNodePtr &kernel) const { AnfAlgo::InferShape(kernel); }
// Whether the graph sink executing through the device capability, the default behavior is not sink and return false.
virtual bool IsExecutingSink(const KernelGraphPtr &graph) const { return false; }
// Whether the graph loop sink executing through the device capability, the default behavior is not loop sink and

View File

@ -22,6 +22,7 @@
#include <algorithm>
#include "utils/log_adapter.h"
#include "backend/common/session/anf_runtime_algorithm.h"
#include "backend/common/optimizer/dynamic_shape/dynamic_shape_helper.h"
#include "include/common/utils/convert_utils.h"
#include "runtime/device/ms_device_shape_transfer.h"
#include "runtime/pynative/op_runtime_info.h"
@ -420,6 +421,31 @@ void ChangeInputDynamicAbsToActualAbs(const CNodePtr &cnode) {
}
}
void UpdateDynamicShape(const CNodePtr &kernel) {
MS_EXCEPTION_IF_NULL(kernel);
auto kernel_mod = AnfAlgo::GetKernelMod(kernel);
MS_EXCEPTION_IF_NULL(kernel_mod);
if (session::AnfRuntimeAlgorithm::GetKernelType(kernel) == KernelType::AKG_KERNEL) {
MS_LOG(EXCEPTION) << "Akg kernel do not support dynamic shape: " << kernel->fullname_with_scope();
}
opt::dynamic_shape::InferOp(kernel);
auto args = kernel::GetArgsFromCNode(kernel);
if (kernel_mod->GetKernelModType() == kernel::KernelModType::NativeGpuKernelMod ||
kernel_mod->GetKernelModType() == kernel::KernelModType::NativeCpuKernelMod) {
auto update = kernel::AbstractArgsFromCNode(kernel);
if (args == nullptr) {
args = std::make_shared<kernel::KernelArgs>();
}
args->op = update.op;
update.depend_tensor_map = args->depend_tensor_map;
kernel::SetArgsToCNode(kernel, update);
}
if (kernel_mod->Resize(args->op, args->inputs, args->outputs, args->depend_tensor_map) ==
kernel::KRET_RESIZE_FAILED) {
MS_LOG(EXCEPTION) << "Node " << kernel->fullname_with_scope() << " Resize failed.";
}
}
// kernel_mode launch
void LaunchKernels(const KernelGraphPtr &graph, const device::DeviceContext *device_context) {
MS_EXCEPTION_IF_NULL(graph);
@ -441,7 +467,7 @@ void LaunchKernels(const KernelGraphPtr &graph, const device::DeviceContext *dev
if (is_dynamic_shape) {
ChangeInputDynamicAbsToActualAbs(node);
device_context->UpdateDynamicShape(node);
UpdateDynamicShape(node);
}
auto workspaces = CreateKernelWorkspaceAddress(runtime_info, device_context, node, is_dynamic_shape);