!29477 support EnvironDestoryAll on GPU/CPU

Merge pull request !29477 from limingqi107/bug_fix4
This commit is contained in:
i-robot 2022-01-26 02:38:59 +00:00 committed by Gitee
commit 753ef2a904
No known key found for this signature in database
GPG Key ID: 173E9B9CA92EEF8F
6 changed files with 175 additions and 6 deletions

View File

@ -0,0 +1,42 @@
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "backend/kernel_compiler/cpu/environ/environ_cpu_destroy_all.h"
#include "backend/kernel_compiler/environ_manager.h"
namespace mindspore {
namespace kernel {
void EnvironDestroyAllCpuKernelMod::InitKernel(const CNodePtr &node) {
MS_EXCEPTION_IF_NULL(node);
// Check the output type.
auto output_type = AnfAlgo::GetOutputDeviceDataType(node, 0);
if (output_type != TypeId::kNumberTypeBool) {
MS_LOG(EXCEPTION) << "The output type is invalid: " << output_type;
}
output_size_list_.push_back(sizeof(bool));
}
bool EnvironDestroyAllCpuKernelMod::Launch(const std::vector<AddressPtr> &, const std::vector<AddressPtr> &,
const std::vector<AddressPtr> &) {
MS_LOG(INFO) << "Clear the global environ data.";
// Clear the global data which are generated in the kernel running.
EnvironMgr::GetInstance().Clear();
return true;
}
} // namespace kernel
} // namespace mindspore

View File

@ -0,0 +1,40 @@
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_ENVIRON_ENVIRON_CPU_DESTROY_ALL_H_
#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_ENVIRON_ENVIRON_CPU_DESTROY_ALL_H_
#include <vector>
#include <string>
#include "backend/kernel_compiler/cpu/cpu_kernel.h"
#include "backend/kernel_compiler/cpu/cpu_kernel_factory.h"
namespace mindspore {
namespace kernel {
class EnvironDestroyAllCpuKernelMod : public NativeCpuKernelMod {
public:
EnvironDestroyAllCpuKernelMod() = default;
~EnvironDestroyAllCpuKernelMod() = default;
bool Launch(const std::vector<AddressPtr> &inputs, const std::vector<AddressPtr> &workspace,
const std::vector<AddressPtr> &outputs) override;
void InitKernel(const CNodePtr &node);
};
MS_REG_CPU_KERNEL(EnvironDestroyAll, KernelAttr().AddOutputAttr(kNumberTypeBool), EnvironDestroyAllCpuKernelMod);
} // namespace kernel
} // namespace mindspore
#endif // MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_ENVIRON_ENVIRON_CPU_DESTROY_ALL_H_

View File

@ -0,0 +1,46 @@
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "backend/kernel_compiler/gpu/environ/environ_gpu_destroy_all.h"
#include "backend/kernel_compiler/environ_manager.h"
namespace mindspore {
namespace kernel {
bool EnvironDestroyAllGpuKernelMod::Init(const CNodePtr &kernel_node) {
MS_EXCEPTION_IF_NULL(kernel_node);
// Check the output type.
auto output_type = AnfAlgo::GetOutputDeviceDataType(kernel_node, 0);
if (output_type != TypeId::kNumberTypeBool) {
MS_LOG(ERROR) << "The output type is invalid: " << output_type;
return false;
}
InitSizeLists();
return true;
}
void EnvironDestroyAllGpuKernelMod::InitSizeLists() { output_size_list_.push_back(sizeof(bool)); }
bool EnvironDestroyAllGpuKernelMod::Launch(const std::vector<AddressPtr> &, const std::vector<AddressPtr> &,
const std::vector<AddressPtr> &, void *) {
MS_LOG(INFO) << "Clear the global environ data.";
// Clear the global data which are generated in the kernel running.
EnvironMgr::GetInstance().Clear();
return true;
}
} // namespace kernel
} // namespace mindspore

View File

@ -0,0 +1,43 @@
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_ENVIRON_ENVIRON_GPU_DESTROY_ALL_H_
#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_ENVIRON_ENVIRON_GPU_DESTROY_ALL_H_
#include <vector>
#include <string>
#include "backend/kernel_compiler/gpu/gpu_kernel.h"
#include "backend/kernel_compiler/gpu/gpu_kernel_factory.h"
namespace mindspore {
namespace kernel {
class EnvironDestroyAllGpuKernelMod : public NativeGpuKernelMod {
public:
EnvironDestroyAllGpuKernelMod() = default;
~EnvironDestroyAllGpuKernelMod() = default;
bool Launch(const std::vector<AddressPtr> &inputs, const std::vector<AddressPtr> &workspace,
const std::vector<AddressPtr> &outputs, void *stream_ptr) override;
bool Init(const CNodePtr &kernel_node) override;
protected:
void InitSizeLists() override;
};
MS_REG_GPU_KERNEL(EnvironDestroyAll, EnvironDestroyAllGpuKernelMod)
} // namespace kernel
} // namespace mindspore
#endif // MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_ENVIRON_ENVIRON_GPU_DESTROY_ALL_H_

View File

@ -1,5 +1,5 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
* Copyright 2021-2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -23,7 +23,6 @@
#include "runtime/framework/actor/control_flow/entrance_actor.h"
#include "mindrt/include/async/async.h"
#include "utils/log_adapter.h"
#include "backend/kernel_compiler/environ_manager.h"
namespace mindspore {
namespace runtime {
@ -77,9 +76,6 @@ void LoopCountActor::SendOutput(OpContext<DeviceTensor> *const context) {
ActorDispatcher::Send(entrance_aid, &EntranceActor::ClearDataOnStepEnd, from_aid, context);
}
// Clear the global data which are generated in the kernel running.
kernel::EnvironMgr::GetInstance().Clear();
// The LoopCountActor exits.
if (current_count_ == loop_count_) {
current_count_ = 0;

View File

@ -1,5 +1,5 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
* Copyright 2021-2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -80,6 +80,7 @@ void MemoryManagerActor::AllocateMemory(const std::vector<DeviceTensor *> *alloc
}
} catch (const std::exception &e) {
SetOpContextMemoryAllocFail(from_aid.Name(), device_context, device_tensor->GetSize(), op_context);
return;
}
}
@ -149,6 +150,7 @@ void MemoryManagerActor::AllocateBatchMemory(const std::vector<DeviceTensor *> *
}
} catch (const std::exception &e) {
SetOpContextMemoryAllocFail(from_aid.Name(), device_context, device_tensor->GetSize(), op_context);
return;
}
}