From cb3d25c8f0a8b259922410f8a9da9500840c9dd5 Mon Sep 17 00:00:00 2001 From: VectorSL <864733542@qq.com> Date: Sun, 28 Nov 2021 21:38:12 +0800 Subject: [PATCH] add cpu tensor array --- .../kernel_compiler/cpu/cpu_kernel_factory.cc | 14 ++- .../cpu/rl/tensor_array_clear_kernel.cc | 53 ++++++++++ .../cpu/rl/tensor_array_clear_kernel.h | 48 +++++++++ .../cpu/rl/tensor_array_close_kernel.cc | 57 ++++++++++ .../cpu/rl/tensor_array_close_kernel.h | 48 +++++++++ .../cpu/rl/tensor_array_create_kernel.cc | 68 ++++++++++++ .../cpu/rl/tensor_array_create_kernel.h | 53 ++++++++++ .../cpu/rl/tensor_array_read_kernel.cc | 72 +++++++++++++ .../cpu/rl/tensor_array_read_kernel.h | 53 ++++++++++ .../cpu/rl/tensor_array_size_kernel.cc | 53 ++++++++++ .../cpu/rl/tensor_array_size_kernel.h | 48 +++++++++ .../cpu/rl/tensor_array_stack_kernel.cc | 100 ++++++++++++++++++ .../cpu/rl/tensor_array_stack_kernel.h | 60 +++++++++++ .../cpu/rl/tensor_array_write_kernel.cc | 87 +++++++++++++++ .../cpu/rl/tensor_array_write_kernel.h | 52 +++++++++ .../gpu/rl/tensor_array_create_kernel.cc | 2 +- .../gpu/rl/tensor_array_size_kernel.cc | 5 +- .../gpu/rl/tensor_array_stack_kernel.cc | 2 +- .../runtime/device/cpu/cpu_tensor_array.cc | 80 ++++++++++++++ .../runtime/device/cpu/cpu_tensor_array.h | 60 +++++++++++ .../runtime/device/gpu/gpu_tensor_array.cc | 32 ------ .../runtime/device/gpu/gpu_tensor_array.h | 8 -- .../ccsrc/runtime/device/tensor_array.cc | 31 ++++++ mindspore/ccsrc/runtime/device/tensor_array.h | 7 +- mindspore/ops/_op_impl/cpu/__init__.py | 7 ++ .../ops/_op_impl/cpu/tensor_array_clear.py | 29 +++++ .../ops/_op_impl/cpu/tensor_array_close.py | 29 +++++ .../ops/_op_impl/cpu/tensor_array_create.py | 28 +++++ .../ops/_op_impl/cpu/tensor_array_read.py | 48 +++++++++ .../ops/_op_impl/cpu/tensor_array_size.py | 28 +++++ .../ops/_op_impl/cpu/tensor_array_stack.py | 37 +++++++ .../ops/_op_impl/cpu/tensor_array_write.py | 49 +++++++++ tests/st/ops/cpu/test_tensor_array.py | 86 +++++++++++++++ 33 files changed, 1379 insertions(+), 55 deletions(-) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/rl/tensor_array_clear_kernel.cc create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/rl/tensor_array_clear_kernel.h create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/rl/tensor_array_close_kernel.cc create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/rl/tensor_array_close_kernel.h create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/rl/tensor_array_create_kernel.cc create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/rl/tensor_array_create_kernel.h create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/rl/tensor_array_read_kernel.cc create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/rl/tensor_array_read_kernel.h create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/rl/tensor_array_size_kernel.cc create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/rl/tensor_array_size_kernel.h create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/rl/tensor_array_stack_kernel.cc create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/rl/tensor_array_stack_kernel.h create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/rl/tensor_array_write_kernel.cc create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/rl/tensor_array_write_kernel.h create mode 100644 mindspore/ccsrc/runtime/device/cpu/cpu_tensor_array.cc create mode 100644 mindspore/ccsrc/runtime/device/cpu/cpu_tensor_array.h create mode 100644 mindspore/ops/_op_impl/cpu/tensor_array_clear.py create mode 100644 mindspore/ops/_op_impl/cpu/tensor_array_close.py create mode 100644 mindspore/ops/_op_impl/cpu/tensor_array_create.py create mode 100644 mindspore/ops/_op_impl/cpu/tensor_array_read.py create mode 100644 mindspore/ops/_op_impl/cpu/tensor_array_size.py create mode 100644 mindspore/ops/_op_impl/cpu/tensor_array_stack.py create mode 100644 mindspore/ops/_op_impl/cpu/tensor_array_write.py create mode 100644 tests/st/ops/cpu/test_tensor_array.py diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/cpu_kernel_factory.cc b/mindspore/ccsrc/backend/kernel_compiler/cpu/cpu_kernel_factory.cc index 2ef00511fe1..2f1f77765e5 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/cpu/cpu_kernel_factory.cc +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/cpu_kernel_factory.cc @@ -61,18 +61,16 @@ void CPUKernelFactory::SetKernelAttrs(const std::shared_ptr op_i MS_EXCEPTION_IF_NULL(op_info); auto inputs_ptr = op_info->inputs_ptr(); auto outputs_ptr = op_info->outputs_ptr(); - if (inputs_ptr.empty()) { - MS_LOG(EXCEPTION) << "op " << op_info->op_name() << " input size is zero."; + if (outputs_ptr.empty()) { + MS_LOG(EXCEPTION) << "op " << op_info->op_name() << " output size is zero."; } - auto first_input_dtypes = inputs_ptr[0]->dtypes(); - auto input_formats = inputs_ptr[0]->formats(); + auto first_output_dtypes = outputs_ptr[0]->dtypes(); - for (size_t i = 0; i < first_input_dtypes.size(); i++) { + for (size_t i = 0; i < first_output_dtypes.size(); i++) { KernelAttr kernel_attr; - (void)kernel_attr.AddInputAttr(kernel::DtypeToTypeId(first_input_dtypes[i]), input_formats[i]); - for (size_t j = 1; j < inputs_ptr.size(); j++) { + for (size_t j = 0; j < inputs_ptr.size(); j++) { auto input_dtypes = inputs_ptr[j]->dtypes(); - input_formats = inputs_ptr[j]->formats(); + auto input_formats = inputs_ptr[j]->formats(); (void)kernel_attr.AddInputAttr(kernel::DtypeToTypeId(input_dtypes[i]), input_formats[i]); } for (size_t j = 0; j < outputs_ptr.size(); j++) { diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/rl/tensor_array_clear_kernel.cc b/mindspore/ccsrc/backend/kernel_compiler/cpu/rl/tensor_array_clear_kernel.cc new file mode 100644 index 00000000000..faa8e3793fa --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/rl/tensor_array_clear_kernel.cc @@ -0,0 +1,53 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "backend/kernel_compiler/cpu/rl/tensor_array_clear_kernel.h" +#include "backend/kernel_compiler/common_utils.h" +#include "runtime/device/cpu/cpu_tensor_array.h" +#include "runtime/device/tensor_array_manager.h" + +namespace mindspore { +namespace kernel { +using mindspore::device::TensorArrayMgr; +using mindspore::device::cpu::CPUTensorArray; +using mindspore::device::cpu::CPUTensorArrayPtr; +TensorArrayCPUClearKernel::TensorArrayCPUClearKernel() {} + +const std::vector &TensorArrayCPUClearKernel::GetInputSizeList() const { return input_size_list_; } + +const std::vector &TensorArrayCPUClearKernel::GetOutputSizeList() const { return output_size_list_; } + +const std::vector &TensorArrayCPUClearKernel::GetWorkspaceSizeList() const { return workspace_size_list_; } + +void TensorArrayCPUClearKernel::InitKernel(const CNodePtr &kernel_node) { + MS_EXCEPTION_IF_NULL(kernel_node); + input_size_list_.push_back(sizeof(int64_t)); + output_size_list_.push_back(sizeof(int64_t)); +} + +bool TensorArrayCPUClearKernel::Launch(const std::vector &inputs, const std::vector &, + const std::vector &) { + auto handle_addr = GetDeviceAddress(inputs, 0); + MS_EXCEPTION_IF_NULL(handle_addr); + CPUTensorArrayPtr tensors_ = + std::dynamic_pointer_cast(TensorArrayMgr::GetInstance().GetTensorArray(handle_addr)); + MS_ERROR_IF_NULL(tensors_); + // Clear TensorArray valid size, but keep the memory. + tensors_->Clear(); + return true; +} +} // namespace kernel +} // namespace mindspore diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/rl/tensor_array_clear_kernel.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/rl/tensor_array_clear_kernel.h new file mode 100644 index 00000000000..49a44963a5d --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/rl/tensor_array_clear_kernel.h @@ -0,0 +1,48 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_RL_TENSOR_ARRAY_CLEAR_KERNEL_H_ +#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_RL_TENSOR_ARRAY_CLEAR_KERNEL_H_ + +#include +#include +#include "backend/kernel_compiler/cpu/cpu_kernel.h" +#include "backend/kernel_compiler/cpu/cpu_kernel_factory.h" + +namespace mindspore { +namespace kernel { +class TensorArrayCPUClearKernel : public CPUKernel { + public: + TensorArrayCPUClearKernel(); + ~TensorArrayCPUClearKernel() = default; + + const std::vector &GetInputSizeList() const override; + const std::vector &GetOutputSizeList() const override; + const std::vector &GetWorkspaceSizeList() const override; + bool Launch(const std::vector &inputs, const std::vector &workspace, + const std::vector &outputs) override; + void InitKernel(const CNodePtr &kernel_node) override; + + private: + std::vector input_size_list_; + std::vector output_size_list_; + std::vector workspace_size_list_; +}; + +MS_REG_CPU_KERNEL(TensorArrayClear, KernelAttr(), TensorArrayCPUClearKernel) +} // namespace kernel +} // namespace mindspore + +#endif // MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_RL_TENSOR_ARRAY_CLEAR_KERNEL_H_ diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/rl/tensor_array_close_kernel.cc b/mindspore/ccsrc/backend/kernel_compiler/cpu/rl/tensor_array_close_kernel.cc new file mode 100644 index 00000000000..26fe6842d4d --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/rl/tensor_array_close_kernel.cc @@ -0,0 +1,57 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "backend/kernel_compiler/cpu/rl/tensor_array_close_kernel.h" +#include "backend/kernel_compiler/common_utils.h" +#include "runtime/device/cpu/cpu_tensor_array.h" +#include "runtime/device/tensor_array_manager.h" + +namespace mindspore { +namespace kernel { +using mindspore::device::TensorArrayMgr; +using mindspore::device::cpu::CPUTensorArray; +using mindspore::device::cpu::CPUTensorArrayPtr; +TensorArrayCPUCloseKernel::TensorArrayCPUCloseKernel() {} + +const std::vector &TensorArrayCPUCloseKernel::GetInputSizeList() const { return input_size_list_; } + +const std::vector &TensorArrayCPUCloseKernel::GetOutputSizeList() const { return output_size_list_; } + +const std::vector &TensorArrayCPUCloseKernel::GetWorkspaceSizeList() const { return workspace_size_list_; } + +void TensorArrayCPUCloseKernel::InitKernel(const CNodePtr &kernel_node) { + MS_EXCEPTION_IF_NULL(kernel_node); + input_size_list_.push_back(sizeof(int64_t)); + output_size_list_.push_back(sizeof(int64_t)); +} + +bool TensorArrayCPUCloseKernel::Launch(const std::vector &inputs, const std::vector &, + const std::vector &) { + auto handle_addr = GetDeviceAddress(inputs, 0); + MS_EXCEPTION_IF_NULL(handle_addr); + CPUTensorArrayPtr tensors_ = + std::dynamic_pointer_cast(TensorArrayMgr::GetInstance().GetTensorArray(handle_addr)); + MS_ERROR_IF_NULL(tensors_); + // Free device mem + tensors_->Free(); + // Erase tensorarray + if (!TensorArrayMgr::GetInstance().EraseTensorArray(handle_addr)) { + MS_LOG(EXCEPTION) << "Free tensorarray failed"; + } + return true; +} +} // namespace kernel +} // namespace mindspore diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/rl/tensor_array_close_kernel.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/rl/tensor_array_close_kernel.h new file mode 100644 index 00000000000..b5988be04be --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/rl/tensor_array_close_kernel.h @@ -0,0 +1,48 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_RL_TENSOR_ARRAY_CLOSE_KERNEL_H_ +#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_RL_TENSOR_ARRAY_CLOSE_KERNEL_H_ + +#include +#include +#include "backend/kernel_compiler/cpu/cpu_kernel.h" +#include "backend/kernel_compiler/cpu/cpu_kernel_factory.h" + +namespace mindspore { +namespace kernel { +class TensorArrayCPUCloseKernel : public CPUKernel { + public: + TensorArrayCPUCloseKernel(); + ~TensorArrayCPUCloseKernel() = default; + + const std::vector &GetInputSizeList() const override; + const std::vector &GetOutputSizeList() const override; + const std::vector &GetWorkspaceSizeList() const override; + bool Launch(const std::vector &inputs, const std::vector &workspace, + const std::vector &outputs) override; + void InitKernel(const CNodePtr &kernel_node) override; + + private: + std::vector input_size_list_; + std::vector output_size_list_; + std::vector workspace_size_list_; +}; + +MS_REG_CPU_KERNEL(TensorArrayClose, KernelAttr(), TensorArrayCPUCloseKernel) +} // namespace kernel +} // namespace mindspore + +#endif // MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_RL_TENSOR_ARRAY_CLOSE_KERNEL_H_ diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/rl/tensor_array_create_kernel.cc b/mindspore/ccsrc/backend/kernel_compiler/cpu/rl/tensor_array_create_kernel.cc new file mode 100644 index 00000000000..e76e42c3801 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/rl/tensor_array_create_kernel.cc @@ -0,0 +1,68 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include "backend/kernel_compiler/cpu/rl/tensor_array_create_kernel.h" +#include "backend/kernel_compiler/common_utils.h" +#include "runtime/device/cpu/cpu_tensor_array.h" +#include "runtime/device/tensor_array_manager.h" + +namespace mindspore { +namespace kernel { +using mindspore::device::TensorArrayMgr; +using mindspore::device::cpu::CPUTensorArray; +using mindspore::device::cpu::CPUTensorArrayPtr; +TensorArrayCPUCreateKernel::TensorArrayCPUCreateKernel() : is_dynamic_(true), size_(0), type_(nullptr) {} + +const std::vector &TensorArrayCPUCreateKernel::GetInputSizeList() const { return input_size_list_; } + +const std::vector &TensorArrayCPUCreateKernel::GetOutputSizeList() const { return output_size_list_; } + +const std::vector &TensorArrayCPUCreateKernel::GetWorkspaceSizeList() const { return workspace_size_list_; } + +void TensorArrayCPUCreateKernel::InitKernel(const CNodePtr &kernel_node) { + MS_EXCEPTION_IF_NULL(kernel_node); + auto shape = AnfAlgo::GetNodeAttr>(kernel_node, "element_shape"); + for (auto i : shape) { + shapes_.push_back(LongToSize(i)); + } + type_ = AnfAlgo::GetNodeAttr(kernel_node, "dtype"); + size_ = AnfAlgo::GetNodeAttr(kernel_node, "size"); + is_dynamic_ = AnfAlgo::GetNodeAttr(kernel_node, "dynamic_size"); + name_ = AnfAlgo::GetNodeAttr(kernel_node, "name"); + output_size_list_.push_back(sizeof(int64_t)); +} + +bool TensorArrayCPUCreateKernel::Launch(const std::vector &, const std::vector &, + const std::vector &outputs) { + // Create a tensorarray, and generate an unique handle. + int64_t tensor_array_handle = TensorArrayMgr::GetInstance().GetHandleCount(); + auto name = "CPUTensorArray_" + name_ + "_" + std::to_string(tensor_array_handle); + CPUTensorArrayPtr tensor_array = std::make_shared(name, type_, shapes_); + MS_EXCEPTION_IF_NULL(tensor_array); + tensor_array->SetMaxSize(size_, is_dynamic_); + auto out_addr = GetDeviceAddress(outputs, 0); + MS_EXCEPTION_IF_NULL(out_addr); + // Set handle to out_addr. + out_addr[0] = tensor_array_handle; + MS_LOG(DEBUG) << "Create handle id " << tensor_array_handle; + // Put tensorarray to a saved map : map in tensorarray manager. + // Only put the device addr as the key to avoid a copy from device to host. + // The output handle address will kept and won't be reused. + TensorArrayMgr::GetInstance().AddTensorArray(out_addr, tensor_array); + return true; +} +} // namespace kernel +} // namespace mindspore diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/rl/tensor_array_create_kernel.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/rl/tensor_array_create_kernel.h new file mode 100644 index 00000000000..bcebed4582b --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/rl/tensor_array_create_kernel.h @@ -0,0 +1,53 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_RL_TENSOR_ARRAY_CREATE_KERNEL_H_ +#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_RL_TENSOR_ARRAY_CREATE_KERNEL_H_ + +#include +#include +#include "backend/kernel_compiler/cpu/cpu_kernel.h" +#include "backend/kernel_compiler/cpu/cpu_kernel_factory.h" + +namespace mindspore { +namespace kernel { +class TensorArrayCPUCreateKernel : public CPUKernel { + public: + TensorArrayCPUCreateKernel(); + ~TensorArrayCPUCreateKernel() = default; + + const std::vector &GetInputSizeList() const override; + const std::vector &GetOutputSizeList() const override; + const std::vector &GetWorkspaceSizeList() const override; + bool Launch(const std::vector &inputs, const std::vector &workspace, + const std::vector &outputs) override; + void InitKernel(const CNodePtr &kernel_node) override; + + private: + bool is_dynamic_; + int64_t size_; + std::vector shapes_; + TypePtr type_; + std::string name_; + std::vector input_size_list_; + std::vector output_size_list_; + std::vector workspace_size_list_; +}; + +MS_REG_CPU_KERNEL(TensorArray, KernelAttr(), TensorArrayCPUCreateKernel) +} // namespace kernel +} // namespace mindspore + +#endif // MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_RL_TENSOR_ARRAY_CREATE_KERNEL_H_ diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/rl/tensor_array_read_kernel.cc b/mindspore/ccsrc/backend/kernel_compiler/cpu/rl/tensor_array_read_kernel.cc new file mode 100644 index 00000000000..91d1d3f6b46 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/rl/tensor_array_read_kernel.cc @@ -0,0 +1,72 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "backend/kernel_compiler/cpu/rl/tensor_array_read_kernel.h" +#include "backend/kernel_compiler/common_utils.h" +#include "runtime/device/cpu/cpu_tensor_array.h" +#include "runtime/device/tensor_array_manager.h" + +namespace mindspore { +namespace kernel { +using mindspore::device::TensorArrayMgr; +using mindspore::device::cpu::CPUTensorArray; +using mindspore::device::cpu::CPUTensorArrayPtr; +TensorArrayCPUReadKernel::TensorArrayCPUReadKernel() : value_size_(0), type_(nullptr) {} + +const std::vector &TensorArrayCPUReadKernel::GetInputSizeList() const { return input_size_list_; } + +const std::vector &TensorArrayCPUReadKernel::GetOutputSizeList() const { return output_size_list_; } + +const std::vector &TensorArrayCPUReadKernel::GetWorkspaceSizeList() const { return workspace_size_list_; } + +void TensorArrayCPUReadKernel::InitKernel(const CNodePtr &kernel_node) { + MS_EXCEPTION_IF_NULL(kernel_node); + shapes_ = AnfAlgo::GetNodeAttr>(kernel_node, "element_shape"); + type_ = AnfAlgo::GetNodeAttr(kernel_node, "dtype"); + value_size_ = GetTypeByte(type_); + for (auto i : shapes_) { + value_size_ *= i; + } + input_size_list_.push_back(sizeof(int64_t)); + input_size_list_.push_back(sizeof(int64_t)); + output_size_list_.push_back(value_size_); +} + +bool TensorArrayCPUReadKernel::Launch(const std::vector &inputs, const std::vector &, + const std::vector &outputs) { + auto handle_addr = GetDeviceAddress(inputs, 0); + auto index = GetDeviceAddress(inputs, 1); + auto out_value = GetDeviceAddress(outputs, 0); + MS_EXCEPTION_IF_NULL(handle_addr); + MS_EXCEPTION_IF_NULL(index); + MS_EXCEPTION_IF_NULL(out_value); + int64_t index_host = index[0]; + CPUTensorArrayPtr tensors_ = + std::dynamic_pointer_cast(TensorArrayMgr::GetInstance().GetTensorArray(handle_addr)); + MS_ERROR_IF_NULL(tensors_); + if (!tensors_->CheckReadIndexLogical(index_host)) { + MS_LOG(EXCEPTION) << "Invalid index " << index_host << " for read."; + } + auto value_addr = tensors_->Read(index_host); + MS_LOG(DEBUG) << "Read value index:" << index_host; + auto ret = memcpy_s(out_value, value_size_, value_addr->addr, value_size_); + if (ret != EOK) { + MS_LOG(EXCEPTION) << "Memcpy failed."; + } + return true; +} +} // namespace kernel +} // namespace mindspore diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/rl/tensor_array_read_kernel.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/rl/tensor_array_read_kernel.h new file mode 100644 index 00000000000..70d252f12bc --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/rl/tensor_array_read_kernel.h @@ -0,0 +1,53 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_RL_TENSOR_ARRAY_READ_KERNEL_H_ +#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_RL_TENSOR_ARRAY_READ_KERNEL_H_ + +#include +#include +#include "backend/kernel_compiler/cpu/cpu_kernel.h" +#include "backend/kernel_compiler/cpu/cpu_kernel_factory.h" + +namespace mindspore { +namespace kernel { +class TensorArrayCPUReadKernel : public CPUKernel { + public: + TensorArrayCPUReadKernel(); + ~TensorArrayCPUReadKernel() = default; + + const std::vector &GetInputSizeList() const override; + const std::vector &GetOutputSizeList() const override; + const std::vector &GetWorkspaceSizeList() const override; + bool Launch(const std::vector &inputs, const std::vector &workspace, + const std::vector &outputs) override; + void InitKernel(const CNodePtr &kernel_node) override; + + private: + size_t value_size_; + std::vector shapes_; + TypePtr type_; + + std::vector input_size_list_; + std::vector output_size_list_; + std::vector workspace_size_list_; +}; + +MS_REG_CPU_KERNEL(TensorArrayRead, KernelAttr(), TensorArrayCPUReadKernel) +} // namespace kernel +} // namespace mindspore + +#endif // MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_RL_TENSOR_ARRAY_READ_KERNEL_H_ diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/rl/tensor_array_size_kernel.cc b/mindspore/ccsrc/backend/kernel_compiler/cpu/rl/tensor_array_size_kernel.cc new file mode 100644 index 00000000000..df21b1eea23 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/rl/tensor_array_size_kernel.cc @@ -0,0 +1,53 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "backend/kernel_compiler/cpu/rl/tensor_array_size_kernel.h" +#include "backend/kernel_compiler/common_utils.h" +#include "runtime/device/cpu/cpu_tensor_array.h" +#include "runtime/device/tensor_array_manager.h" + +namespace mindspore { +namespace kernel { +using mindspore::device::TensorArrayMgr; +TensorArrayCPUSizeKernel::TensorArrayCPUSizeKernel() {} + +const std::vector &TensorArrayCPUSizeKernel::GetInputSizeList() const { return input_size_list_; } + +const std::vector &TensorArrayCPUSizeKernel::GetOutputSizeList() const { return output_size_list_; } + +const std::vector &TensorArrayCPUSizeKernel::GetWorkspaceSizeList() const { return workspace_size_list_; } + +void TensorArrayCPUSizeKernel::InitKernel(const CNodePtr &kernel_node) { + MS_EXCEPTION_IF_NULL(kernel_node); + input_size_list_.push_back(sizeof(int64_t)); + output_size_list_.push_back(sizeof(int64_t)); +} + +bool TensorArrayCPUSizeKernel::Launch(const std::vector &inputs, const std::vector &, + const std::vector &outputs) { + auto handle_addr = GetDeviceAddress(inputs, 0); + auto out_addr = GetDeviceAddress(outputs, 0); + MS_EXCEPTION_IF_NULL(handle_addr); + MS_EXCEPTION_IF_NULL(out_addr); + auto tensors_ = TensorArrayMgr::GetInstance().GetTensorArray(handle_addr); + MS_ERROR_IF_NULL(tensors_); + int64_t valid_size = SizeToLong(tensors_->GetValidSize()); + out_addr[0] = valid_size; + MS_LOG(DEBUG) << "Launch TensorArraySize, valid size is " << out_addr[0]; + return true; +} +} // namespace kernel +} // namespace mindspore diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/rl/tensor_array_size_kernel.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/rl/tensor_array_size_kernel.h new file mode 100644 index 00000000000..624e235acf8 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/rl/tensor_array_size_kernel.h @@ -0,0 +1,48 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_RL_TENSOR_ARRAY_SIZE_KERNEL_H_ +#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_RL_TENSOR_ARRAY_SIZE_KERNEL_H_ + +#include +#include +#include "backend/kernel_compiler/cpu/cpu_kernel.h" +#include "backend/kernel_compiler/cpu/cpu_kernel_factory.h" + +namespace mindspore { +namespace kernel { +class TensorArrayCPUSizeKernel : public CPUKernel { + public: + TensorArrayCPUSizeKernel(); + ~TensorArrayCPUSizeKernel() = default; + + const std::vector &GetInputSizeList() const override; + const std::vector &GetOutputSizeList() const override; + const std::vector &GetWorkspaceSizeList() const override; + bool Launch(const std::vector &inputs, const std::vector &workspace, + const std::vector &outputs) override; + void InitKernel(const CNodePtr &kernel_node) override; + + private: + std::vector input_size_list_; + std::vector output_size_list_; + std::vector workspace_size_list_; +}; + +MS_REG_CPU_KERNEL(TensorArraySize, KernelAttr(), TensorArrayCPUSizeKernel) +} // namespace kernel +} // namespace mindspore + +#endif // MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_RL_TENSOR_ARRAY_SIZE_KERNEL_H_ diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/rl/tensor_array_stack_kernel.cc b/mindspore/ccsrc/backend/kernel_compiler/cpu/rl/tensor_array_stack_kernel.cc new file mode 100644 index 00000000000..5fc53bb1cf2 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/rl/tensor_array_stack_kernel.cc @@ -0,0 +1,100 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "backend/kernel_compiler/cpu/rl/tensor_array_stack_kernel.h" +#include +#include "backend/kernel_compiler/common_utils.h" +#include "runtime/device/cpu/cpu_tensor_array.h" +#include "runtime/device/tensor_array_manager.h" + +namespace mindspore { +namespace kernel { +using mindspore::device::TensorArrayMgr; +using mindspore::device::cpu::CPUTensorArray; +using mindspore::device::cpu::CPUTensorArrayPtr; +TensorArrayCPUStackKernel::TensorArrayCPUStackKernel() + : handle_(nullptr), value_size_(0), ele_size_(0), type_(nullptr) { + ResetResource(); +} + +const std::vector &TensorArrayCPUStackKernel::GetInputSizeList() const { return input_size_list_; } + +const std::vector &TensorArrayCPUStackKernel::GetOutputSizeList() const { return output_size_list_; } + +const std::vector &TensorArrayCPUStackKernel::GetWorkspaceSizeList() const { return workspace_size_list_; } + +void TensorArrayCPUStackKernel::InitKernel(const CNodePtr &kernel_node) { + MS_EXCEPTION_IF_NULL(kernel_node); + kernel_node_ = kernel_node; + auto shape = AnfAlgo::GetNodeAttr>(kernel_node, "element_shape"); + auto max_element = AnfAlgo::GetNodeAttr(kernel_node, "max_element"); + for (auto i : shape) { + shapes_.push_back(LongToSize(i)); + } + type_ = AnfAlgo::GetNodeAttr(kernel_node, "dtype"); + ele_size_ = GetTypeByte(type_); + for (auto i : shapes_) { + ele_size_ *= i; + } + value_size_ = ele_size_ * LongToSize(max_element); + output_size_list_.push_back(value_size_); + input_size_list_.push_back(sizeof(int64_t)); +} + +void TensorArrayCPUStackKernel::PostExecute() { + CPUTensorArrayPtr tensors_ = + std::dynamic_pointer_cast(TensorArrayMgr::GetInstance().GetTensorArray(handle_)); + MS_EXCEPTION_IF_NULL(tensors_); + size_t tensor_size = tensors_->GetValidSize(); + auto shape = shapes_; + shape.insert(shape.begin(), tensor_size); + MS_LOG(DEBUG) << "After postexecute, the real shape of TensorArrayStack is " << shape; + AnfAlgo::SetOutputInferTypeAndShape({type_->type_id()}, {shape}, kernel_node_.lock().get()); +} + +void TensorArrayCPUStackKernel::ResetResource() noexcept { + handle_ = nullptr; + value_size_ = 0; + ele_size_ = 0; + shapes_.clear(); + input_size_list_.clear(); + output_size_list_.clear(); + workspace_size_list_.clear(); +} + +bool TensorArrayCPUStackKernel::Launch(const std::vector &inputs, const std::vector &, + const std::vector &outputs) { + handle_ = GetDeviceAddress(inputs, 0); + auto out_value = GetDeviceAddress(outputs, 0); + MS_EXCEPTION_IF_NULL(out_value); + MS_EXCEPTION_IF_NULL(handle_); + CPUTensorArrayPtr tensors_ = + std::dynamic_pointer_cast(TensorArrayMgr::GetInstance().GetTensorArray(handle_)); + MS_EXCEPTION_IF_NULL(tensors_); + if (tensors_->GetValidSize() > tensors_->GetRealSize()) { + MS_LOG(EXCEPTION) << "Invalid TensorArray size, maybe should Clear() TensorArray before next usage."; + } + for (size_t i = 0; i < tensors_->GetValidSize(); i++) { + auto ret = memcpy_s(out_value + ele_size_ * i, ele_size_, tensors_->GetTensorAddr(i), ele_size_); + if (ret != EOK) { + MS_LOG(EXCEPTION) << "Memcpy failed."; + } + } + PostExecute(); + return true; +} +} // namespace kernel +} // namespace mindspore diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/rl/tensor_array_stack_kernel.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/rl/tensor_array_stack_kernel.h new file mode 100644 index 00000000000..44d6c132926 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/rl/tensor_array_stack_kernel.h @@ -0,0 +1,60 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_RL_TENSOR_ARRAY_STACK_KERNEL_H_ +#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_RL_TENSOR_ARRAY_STACK_KERNEL_H_ + +#include +#include +#include +#include "backend/kernel_compiler/cpu/cpu_kernel.h" +#include "backend/kernel_compiler/cpu/cpu_kernel_factory.h" + +namespace mindspore { +namespace kernel { +class TensorArrayCPUStackKernel : public CPUKernel { + public: + TensorArrayCPUStackKernel(); + ~TensorArrayCPUStackKernel() = default; + + const std::vector &GetInputSizeList() const override; + const std::vector &GetOutputSizeList() const override; + const std::vector &GetWorkspaceSizeList() const override; + bool Launch(const std::vector &inputs, const std::vector &workspace, + const std::vector &outputs) override; + void InitKernel(const CNodePtr &kernel_node) override; + + protected: + void PostExecute(); + void ResetResource() noexcept; + + private: + CNodeWeakPtr kernel_node_; + int64_t *handle_; + int64_t value_size_; + int64_t ele_size_; + std::vector shapes_; + TypePtr type_; + std::vector input_size_list_; + std::vector output_size_list_; + std::vector workspace_size_list_; +}; + +MS_REG_CPU_KERNEL(TensorArrayStack, KernelAttr(), TensorArrayCPUStackKernel) +} // namespace kernel +} // namespace mindspore + +#endif // MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_RL_TENSOR_ARRAY_STACK_KERNEL_H_ diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/rl/tensor_array_write_kernel.cc b/mindspore/ccsrc/backend/kernel_compiler/cpu/rl/tensor_array_write_kernel.cc new file mode 100644 index 00000000000..7e8d63ec883 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/rl/tensor_array_write_kernel.cc @@ -0,0 +1,87 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "backend/kernel_compiler/cpu/rl/tensor_array_write_kernel.h" +#include +#include "backend/kernel_compiler/common_utils.h" +#include "runtime/device/cpu/cpu_tensor_array.h" +#include "runtime/device/tensor_array_manager.h" +#include "runtime/hardware/cpu/cpu_memory_pool.h" +namespace mindspore { +namespace kernel { +constexpr size_t kSecondInputIndex = 2; +using mindspore::device::TensorArrayMgr; +using mindspore::device::cpu::CPUTensorArray; +using mindspore::device::cpu::CPUTensorArrayPtr; +TensorArrayCPUWriteKernel::TensorArrayCPUWriteKernel() : value_size_(0) {} + +const std::vector &TensorArrayCPUWriteKernel::GetInputSizeList() const { return input_size_list_; } + +const std::vector &TensorArrayCPUWriteKernel::GetOutputSizeList() const { return output_size_list_; } + +const std::vector &TensorArrayCPUWriteKernel::GetWorkspaceSizeList() const { return workspace_size_list_; } + +void TensorArrayCPUWriteKernel::InitKernel(const CNodePtr &kernel_node) { + MS_EXCEPTION_IF_NULL(kernel_node); + type_ = AnfAlgo::GetPrevNodeOutputInferDataType(kernel_node, kSecondInputIndex); + shapes_ = AnfAlgo::GetInputDeviceShape(kernel_node, kSecondInputIndex); + value_size_ = GetTypeByte(TypeIdToType(type_)); + for (auto i : shapes_) { + value_size_ *= i; + } + input_size_list_.push_back(sizeof(int64_t)); + input_size_list_.push_back(sizeof(int64_t)); + output_size_list_.push_back(sizeof(int64_t)); +} + +bool TensorArrayCPUWriteKernel::Launch(const std::vector &inputs, const std::vector &outputs, + const std::vector &) { + auto handle_addr = GetDeviceAddress(inputs, 0); + auto index = GetDeviceAddress(inputs, 1); + auto value = GetDeviceAddress(inputs, 2); + MS_EXCEPTION_IF_NULL(handle_addr); + MS_EXCEPTION_IF_NULL(index); + MS_EXCEPTION_IF_NULL(value); + int64_t index_host = index[0]; + CPUTensorArrayPtr tensors_ = + std::dynamic_pointer_cast(TensorArrayMgr::GetInstance().GetTensorArray(handle_addr)); + MS_EXCEPTION_IF_NULL(tensors_); + if (!tensors_->CheckValue(type_, shapes_)) { + MS_LOG(EXCEPTION) << "Invalid input data for tensor array write op."; + } + // Manage the value : create/reuse a device memory, and copy the input value to it. + AddressPtr dev_addr = std::make_shared(); + MS_EXCEPTION_IF_NULL(dev_addr); + if (tensors_->GetRealSize() > LongToSize(index_host)) { + dev_addr->addr = tensors_->Read(index_host)->addr; + } else { + dev_addr->addr = mindspore::device::cpu::CPUMemoryPool::GetInstance().AllocTensorMem(value_size_); + MS_LOG(DEBUG) << "Create tensor " << dev_addr->addr << ", size " << value_size_; + } + MS_EXCEPTION_IF_NULL(dev_addr->addr); + dev_addr->size = value_size_; + auto ret = memcpy_s(dev_addr->addr, value_size_, value, value_size_); + if (ret != EOK) { + MS_LOG(EXCEPTION) << "Memcpy failed."; + } + if (tensors_->Write(index_host, dev_addr)) { + MS_LOG(DEBUG) << "Write to tensorarry succeed, index " << index_host; + } else { + MS_LOG(EXCEPTION) << "Failed to write."; + } + return true; +} +} // namespace kernel +} // namespace mindspore diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/rl/tensor_array_write_kernel.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/rl/tensor_array_write_kernel.h new file mode 100644 index 00000000000..9f236f3cc1a --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/rl/tensor_array_write_kernel.h @@ -0,0 +1,52 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_RL_TENSOR_ARRAY_WRITE_KERNEL_H_ +#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_RL_TENSOR_ARRAY_WRITE_KERNEL_H_ + +#include +#include +#include "backend/kernel_compiler/cpu/cpu_kernel.h" +#include "backend/kernel_compiler/cpu/cpu_kernel_factory.h" + +namespace mindspore { +namespace kernel { +class TensorArrayCPUWriteKernel : public CPUKernel { + public: + TensorArrayCPUWriteKernel(); + ~TensorArrayCPUWriteKernel() = default; + + const std::vector &GetInputSizeList() const override; + const std::vector &GetOutputSizeList() const override; + const std::vector &GetWorkspaceSizeList() const override; + bool Launch(const std::vector &inputs, const std::vector &workspace, + const std::vector &outputs) override; + void InitKernel(const CNodePtr &kernel_node) override; + + private: + size_t value_size_; + std::vector shapes_; + TypeId type_; + std::vector input_size_list_; + std::vector output_size_list_; + std::vector workspace_size_list_; +}; + +MS_REG_CPU_KERNEL(TensorArrayWrite, KernelAttr(), TensorArrayCPUWriteKernel) +} // namespace kernel +} // namespace mindspore + +#endif // MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_RL_TENSOR_ARRAY_WRITE_KERNEL_H_ diff --git a/mindspore/ccsrc/backend/kernel_compiler/gpu/rl/tensor_array_create_kernel.cc b/mindspore/ccsrc/backend/kernel_compiler/gpu/rl/tensor_array_create_kernel.cc index 6465aba8d64..faf72fdea43 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/gpu/rl/tensor_array_create_kernel.cc +++ b/mindspore/ccsrc/backend/kernel_compiler/gpu/rl/tensor_array_create_kernel.cc @@ -24,7 +24,7 @@ namespace kernel { using mindspore::device::TensorArrayMgr; using mindspore::device::gpu::GPUTensorArray; using mindspore::device::gpu::GPUTensorArrayPtr; -TensorArrayCreateKernel::TensorArrayCreateKernel() : is_dynamic_(true), size_(0) {} +TensorArrayCreateKernel::TensorArrayCreateKernel() : is_dynamic_(true), size_(0), type_(nullptr) {} const std::vector &TensorArrayCreateKernel::GetInputSizeList() const { return input_size_list_; } diff --git a/mindspore/ccsrc/backend/kernel_compiler/gpu/rl/tensor_array_size_kernel.cc b/mindspore/ccsrc/backend/kernel_compiler/gpu/rl/tensor_array_size_kernel.cc index 7e39d8f4af6..48bd97027f5 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/gpu/rl/tensor_array_size_kernel.cc +++ b/mindspore/ccsrc/backend/kernel_compiler/gpu/rl/tensor_array_size_kernel.cc @@ -22,8 +22,6 @@ namespace mindspore { namespace kernel { using mindspore::device::TensorArrayMgr; -using mindspore::device::gpu::GPUTensorArray; -using mindspore::device::gpu::GPUTensorArrayPtr; TensorArraySizeKernel::TensorArraySizeKernel() {} const std::vector &TensorArraySizeKernel::GetInputSizeList() const { return input_size_list_; } @@ -47,8 +45,7 @@ bool TensorArraySizeKernel::Launch(const std::vector &inputs, const const std::vector &outputs, void *stream_ptr) { auto handle_addr = GetDeviceAddress(inputs, 0); auto out_addr = GetDeviceAddress(outputs, 0); - GPUTensorArrayPtr tensors_ = - std::dynamic_pointer_cast(TensorArrayMgr::GetInstance().GetTensorArray(handle_addr)); + auto tensors_ = TensorArrayMgr::GetInstance().GetTensorArray(handle_addr); MS_ERROR_IF_NULL(tensors_); int64_t valid_size = SizeToLong(tensors_->GetValidSize()); MS_LOG(DEBUG) << "Launch TensorArraySize, valid size is " << valid_size; diff --git a/mindspore/ccsrc/backend/kernel_compiler/gpu/rl/tensor_array_stack_kernel.cc b/mindspore/ccsrc/backend/kernel_compiler/gpu/rl/tensor_array_stack_kernel.cc index 8ab723b4811..a50554715c9 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/gpu/rl/tensor_array_stack_kernel.cc +++ b/mindspore/ccsrc/backend/kernel_compiler/gpu/rl/tensor_array_stack_kernel.cc @@ -28,7 +28,7 @@ using mindspore::device::TensorArrayMgr; using mindspore::device::gpu::GPUTensorArray; using mindspore::device::gpu::GPUTensorArrayPtr; TensorArrayStackKernel::TensorArrayStackKernel() - : handle_(nullptr), value_size_(0), ele_size_(0), stream_ptr_(nullptr) { + : handle_(nullptr), value_size_(0), ele_size_(0), stream_ptr_(nullptr), type_(nullptr) { ResetResource(); } diff --git a/mindspore/ccsrc/runtime/device/cpu/cpu_tensor_array.cc b/mindspore/ccsrc/runtime/device/cpu/cpu_tensor_array.cc new file mode 100644 index 00000000000..d34ed853659 --- /dev/null +++ b/mindspore/ccsrc/runtime/device/cpu/cpu_tensor_array.cc @@ -0,0 +1,80 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "runtime/device/cpu/cpu_tensor_array.h" +#include +#include +#include +#include "runtime/hardware/cpu/cpu_memory_pool.h" + +namespace mindspore { +namespace device { +namespace cpu { +// Add tensor to the TensorArray and increase the size. +// Cast 1: is_dynamic = False and index > max_size_, error. +// Case 2: index > valid_size, fill the rest dev_value with zeros, and set valid_size to index + 1. +// Case 3: index == tensors_.size(), we need to increase both real tensors_ size and valid size, and add +// the new dev_value to tensors_. +// Case 4: tensors_size() > index > valid_size, we can reuse the memory in tensors_[index], so +// only increase the valid_size. +bool CPUTensorArray::Write(const int64_t index, const mindspore::kernel::AddressPtr &dev_value) { + MS_LOG(DEBUG) << "Write dev_value to " << name_; + if (!is_dynamic_ && (index >= max_size_)) { + MS_LOG(ERROR) << name_ << " is not in dynamic size, the max_size is " << max_size_ << ", but get index " << index; + return false; + } + if (LongToSize(index) > valid_size_) { + // Create/reuse (index - valid_size) size dev_value with zeros. + // 1 create new mem : index > real_size ? index - real_size : 0 + // 2 reuse old mem : index > real_size ? real_size - valid_size : index - valid_size + // 3 fill zeros : index - valid_size + size_t create_size = (LongToSize(index) > tensors_.size()) ? (LongToSize(index) - tensors_.size()) : 0; + for (size_t i = 0; i < create_size; i++) { + kernel::AddressPtr create_dev = std::make_shared(); + create_dev->addr = CPUMemoryPool::GetInstance().AllocTensorMem(dev_value->size); + create_dev->size = dev_value->size; + tensors_.push_back(create_dev); + } + tensors_.push_back(dev_value); + // FillZeros(valid_size_, index); + for (size_t i = valid_size_; i < LongToSize(index); i++) { + auto tensor_size = tensors_[i]->size; + (void)memset_s(tensors_[i]->addr, tensor_size, 0, tensors_[i]->size); + } + valid_size_ = LongToSize(index) + 1; + } else if (LongToSize(index) == tensors_.size()) { + MS_LOG(DEBUG) << "Write to index " << index << ", increase tensors' size to " << (tensors_.size() + 1); + tensors_.push_back(dev_value); + valid_size_++; + } else { + MS_LOG(DEBUG) << "Reuse tensors in position " << index << ", tensors size is " << tensors_.size(); + if (LongToSize(index) == valid_size_) valid_size_++; + } + return true; +} + +// Free() will free the memory in TensorArray. +void CPUTensorArray::Free() { + MS_LOG(DEBUG) << "Free device memory for " << name_; + for (const auto &addr : tensors_) { + if (addr != nullptr) { + CPUMemoryPool::GetInstance().FreeTensorMem(static_cast(addr->addr)); + } + } +} +} // namespace cpu +} // namespace device +} // namespace mindspore diff --git a/mindspore/ccsrc/runtime/device/cpu/cpu_tensor_array.h b/mindspore/ccsrc/runtime/device/cpu/cpu_tensor_array.h new file mode 100644 index 00000000000..1455fc2ee27 --- /dev/null +++ b/mindspore/ccsrc/runtime/device/cpu/cpu_tensor_array.h @@ -0,0 +1,60 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CCSRC_RUNTIME_DEVICE_CPU_CPU_TENSOR_ARRAY_H_ +#define MINDSPORE_CCSRC_RUNTIME_DEVICE_CPU_CPU_TENSOR_ARRAY_H_ + +#include +#include +#include +#include "runtime/device/tensor_array.h" + +namespace mindspore { +namespace device { +namespace cpu { +class CPUTensorArray : public TensorArray { + public: + CPUTensorArray(const string &name, const TypePtr &dtype, const std::vector &shapes) + : TensorArray(name, dtype, shapes) {} + ~CPUTensorArray() override = default; + + bool Write(const int64_t index, const mindspore::kernel::AddressPtr &dev_value) override; + void Free() override; + void Clear() override { valid_size_ = 0; } + + size_t GetValidSize() const override { return valid_size_; } + size_t GetRealSize() const override { return tensors_.size(); } + + void *GetTensorAddr(const size_t &index) const { return tensors_[index]->addr; } + + void SetMaxSize(const int64_t size, const bool is_dynamic) override { + is_dynamic_ = is_dynamic; + if (!is_dynamic) { + max_size_ = size; + } + } + + private: + int64_t max_size_; + bool is_dynamic_; +}; +using CPUTensorArray = CPUTensorArray; +using CPUTensorArrayPtr = std::shared_ptr; +} // namespace cpu +} // namespace device +} // namespace mindspore + +#endif // MINDSPORE_CCSRC_RUNTIME_DEVICE_CPU_CPU_TENSOR_ARRAY_H_ diff --git a/mindspore/ccsrc/runtime/device/gpu/gpu_tensor_array.cc b/mindspore/ccsrc/runtime/device/gpu/gpu_tensor_array.cc index 8d63691ec99..793a04d7e91 100644 --- a/mindspore/ccsrc/runtime/device/gpu/gpu_tensor_array.cc +++ b/mindspore/ccsrc/runtime/device/gpu/gpu_tensor_array.cc @@ -25,28 +25,6 @@ namespace mindspore { namespace device { namespace gpu { -bool GPUTensorArray::CheckValue(const TypeId &dtype, const std::vector &shape) { - MS_LOG(DEBUG) << "Check the data shape and type for " << name_; - if (dtype != dtype_->type_id()) { - MS_LOG(ERROR) << "Invalid data type " << TypeIdLabel(dtype) << " for " << name_ << ", the origin type is " - << TypeIdLabel(dtype_->type_id()); - return false; - } - if (shape != shapes_) { - MS_LOG(ERROR) << "Invalid data shape " << shape << " for " << name_ << ", the origin shape is " << shapes_; - return false; - } - return true; -} - -bool GPUTensorArray::CheckReadIndexLogical(const int64_t index) { - if (LongToSize(index) >= valid_size_) { - MS_LOG(ERROR) << "Index " << index << " out of range " << valid_size_ << ", " << name_; - return false; - } - return true; -} - // Add tensor to the TensorArray and increase the size. // Cast 1: is_dynamic = False and index > max_size_, error. // Case 2: index > valid_size, fill the rest dev_value with zeros, and set valid_size to index + 1. @@ -89,16 +67,6 @@ bool GPUTensorArray::Write(const int64_t index, const mindspore::kernel::Address } return true; } - -// Function Read() can get the tensors in the scope of tensors_. -mindspore::kernel::AddressPtr GPUTensorArray::Read(const int64_t index) { - if (LongToSize(index) >= tensors_.size()) { - MS_LOG(EXCEPTION) << "Index " << index << " out of range " << tensors_.size() << ", " << name_; - } - MS_LOG(DEBUG) << "Read tensor index = " << index << ", addr = " << tensors_[LongToSize(index)]->addr; - return tensors_[LongToSize(index)]; -} - // Free() will free the memory in TensorArray. void GPUTensorArray::Free() { MS_LOG(DEBUG) << "Free device memory for " << name_; diff --git a/mindspore/ccsrc/runtime/device/gpu/gpu_tensor_array.h b/mindspore/ccsrc/runtime/device/gpu/gpu_tensor_array.h index db83ab4ac7d..ae99a0e400d 100644 --- a/mindspore/ccsrc/runtime/device/gpu/gpu_tensor_array.h +++ b/mindspore/ccsrc/runtime/device/gpu/gpu_tensor_array.h @@ -32,17 +32,9 @@ class GPUTensorArray : public TensorArray { : TensorArray(name, dtype, shapes) {} ~GPUTensorArray() override = default; - // Check the dtype and shape of the input data. Used in Write(). - bool CheckValue(const TypeId &dtype, const std::vector &shape); - // Check the index in valid range. Used in Read(). - bool CheckReadIndexLogical(const int64_t index); - // Add tensor to the TensorArray and increase the size. bool Write(const int64_t index, const mindspore::kernel::AddressPtr &dev_value) override; - // Function Read() can get the tensors in the scope of tensors_. - mindspore::kernel::AddressPtr Read(const int64_t index) override; - // FreeTensorArray() will free the memory in TensorArray. void Free() override; diff --git a/mindspore/ccsrc/runtime/device/tensor_array.cc b/mindspore/ccsrc/runtime/device/tensor_array.cc index 8f03ebdb548..ab68063a489 100644 --- a/mindspore/ccsrc/runtime/device/tensor_array.cc +++ b/mindspore/ccsrc/runtime/device/tensor_array.cc @@ -17,6 +17,37 @@ namespace mindspore { namespace device { +bool TensorArray::CheckValue(const TypeId &dtype, const std::vector &shape) { + MS_LOG(DEBUG) << "Check the data shape and type for " << name_; + if (dtype != dtype_->type_id()) { + MS_LOG(ERROR) << "Invalid data type " << TypeIdLabel(dtype) << " for " << name_ << ", the origin type is " + << TypeIdLabel(dtype_->type_id()); + return false; + } + if (shape != shapes_) { + MS_LOG(ERROR) << "Invalid data shape " << shape << " for " << name_ << ", the origin shape is " << shapes_; + return false; + } + return true; +} + +bool TensorArray::CheckReadIndexLogical(const int64_t index) { + if (LongToSize(index) >= valid_size_) { + MS_LOG(ERROR) << "Index " << index << " out of range " << valid_size_ << ", " << name_; + return false; + } + return true; +} + +// Function Read() can get the tensors in the scope of tensors_. +mindspore::kernel::AddressPtr TensorArray::Read(const int64_t index) { + if (LongToSize(index) >= tensors_.size()) { + MS_LOG(EXCEPTION) << "Index " << index << " out of range " << tensors_.size() << ", " << name_; + } + MS_LOG(DEBUG) << "Read tensor index = " << index << ", addr = " << tensors_[LongToSize(index)]->addr; + return tensors_[LongToSize(index)]; +} + void TensorArray::Clear() { valid_size_ = 0; return; diff --git a/mindspore/ccsrc/runtime/device/tensor_array.h b/mindspore/ccsrc/runtime/device/tensor_array.h index 6b4f1c599df..75b8dce5557 100644 --- a/mindspore/ccsrc/runtime/device/tensor_array.h +++ b/mindspore/ccsrc/runtime/device/tensor_array.h @@ -33,11 +33,16 @@ class TensorArray { : name_(name), dtype_(dtype), shapes_(shapes), valid_size_(0) {} virtual ~TensorArray() = default; + // Check the index in valid range. Used in Read(). + virtual bool CheckReadIndexLogical(const int64_t index); + // Check the dtype and shape of the input data. Used in Write(). + virtual bool CheckValue(const TypeId &dtype, const std::vector &shape); + // Function Write() is used to insert or append dev_value to the position of index. virtual bool Write(const int64_t index, const mindspore::kernel::AddressPtr &dev_value) = 0; // Function Read() can get the tensors in the scope of tensors_. - virtual mindspore::kernel::AddressPtr Read(const int64_t index) = 0; + virtual mindspore::kernel::AddressPtr Read(const int64_t index); // Free() will free the memory in TensorArray. virtual void Free() = 0; diff --git a/mindspore/ops/_op_impl/cpu/__init__.py b/mindspore/ops/_op_impl/cpu/__init__.py index addd7507aab..aaa759593f0 100644 --- a/mindspore/ops/_op_impl/cpu/__init__.py +++ b/mindspore/ops/_op_impl/cpu/__init__.py @@ -74,3 +74,10 @@ from .pyfunc import _pyfunc_cpu from .buffer_append import _buffer_append_cpu from .buffer_get import _buffer_get_cpu from .buffer_sample import _buffer_sample_cpu +from .tensor_array_clear import _tensor_array_clear_cpu +from .tensor_array_close import _tensor_array_close_cpu +from .tensor_array_create import _tensor_array_create_cpu +from .tensor_array_read import _tensor_array_read_cpu +from .tensor_array_size import _tensor_array_size_cpu +from .tensor_array_stack import _tensor_array_stack_cpu +from .tensor_array_write import _tensor_array_write_cpu diff --git a/mindspore/ops/_op_impl/cpu/tensor_array_clear.py b/mindspore/ops/_op_impl/cpu/tensor_array_clear.py new file mode 100644 index 00000000000..dbe962a4637 --- /dev/null +++ b/mindspore/ops/_op_impl/cpu/tensor_array_clear.py @@ -0,0 +1,29 @@ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +"""TensorArrayClear op""" +from mindspore.ops.op_info_register import op_info_register, CpuRegOp, DataType + +tensor_array_clear_op_info = CpuRegOp("TensorArrayClear") \ + .input(0, "handle", "required") \ + .output(0, "y", "required") \ + .dtype_format(DataType.I64_Default, DataType.I64_Default) \ + .get_op_info() + + +@op_info_register(tensor_array_clear_op_info) +def _tensor_array_clear_cpu(): + """TensorArrayClear cpu register""" + return diff --git a/mindspore/ops/_op_impl/cpu/tensor_array_close.py b/mindspore/ops/_op_impl/cpu/tensor_array_close.py new file mode 100644 index 00000000000..f2c03787bc8 --- /dev/null +++ b/mindspore/ops/_op_impl/cpu/tensor_array_close.py @@ -0,0 +1,29 @@ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +"""TensorArrayClose op""" +from mindspore.ops.op_info_register import op_info_register, CpuRegOp, DataType + +tensor_array_close_op_info = CpuRegOp("TensorArrayClose") \ + .input(0, "handle", "required") \ + .output(0, "y", "required") \ + .dtype_format(DataType.I64_Default, DataType.I64_Default) \ + .get_op_info() + + +@op_info_register(tensor_array_close_op_info) +def _tensor_array_close_cpu(): + """TensorArrayClose cpu register""" + return diff --git a/mindspore/ops/_op_impl/cpu/tensor_array_create.py b/mindspore/ops/_op_impl/cpu/tensor_array_create.py new file mode 100644 index 00000000000..474b1643239 --- /dev/null +++ b/mindspore/ops/_op_impl/cpu/tensor_array_create.py @@ -0,0 +1,28 @@ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +"""TensorArrayCreate op""" +from mindspore.ops.op_info_register import op_info_register, CpuRegOp, DataType + +tensor_array_create_op_info = CpuRegOp("TensorArray") \ + .output(0, "handle", "required") \ + .dtype_format(DataType.I64_Default) \ + .get_op_info() + + +@op_info_register(tensor_array_create_op_info) +def _tensor_array_create_cpu(): + """TensorArrayCreate cpu register""" + return diff --git a/mindspore/ops/_op_impl/cpu/tensor_array_read.py b/mindspore/ops/_op_impl/cpu/tensor_array_read.py new file mode 100644 index 00000000000..5cad8c30222 --- /dev/null +++ b/mindspore/ops/_op_impl/cpu/tensor_array_read.py @@ -0,0 +1,48 @@ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +"""TensorArrayRead op""" +from mindspore.ops.op_info_register import op_info_register, CpuRegOp, DataType + +tensor_array_read_op_info = CpuRegOp("TensorArrayRead") \ + .input(0, "handle", "required") \ + .input(1, "index", "required") \ + .output(0, "y", "required") \ + .dtype_format(DataType.I64_Default, DataType.I64_Default, DataType.I64_Default) \ + .dtype_format(DataType.I64_Default, DataType.I64_Default, DataType.I32_Default) \ + .dtype_format(DataType.I64_Default, DataType.I64_Default, DataType.I16_Default) \ + .dtype_format(DataType.I64_Default, DataType.I64_Default, DataType.U32_Default) \ + .dtype_format(DataType.I64_Default, DataType.I64_Default, DataType.U16_Default) \ + .dtype_format(DataType.I64_Default, DataType.I64_Default, DataType.U8_Default) \ + .dtype_format(DataType.I64_Default, DataType.I64_Default, DataType.U64_Default) \ + .dtype_format(DataType.I64_Default, DataType.I64_Default, DataType.F16_Default) \ + .dtype_format(DataType.I64_Default, DataType.I64_Default, DataType.F32_Default) \ + .dtype_format(DataType.I64_Default, DataType.I64_Default, DataType.BOOL_Default) \ + .dtype_format(DataType.I32_Default, DataType.I64_Default, DataType.I64_Default) \ + .dtype_format(DataType.I32_Default, DataType.I64_Default, DataType.I32_Default) \ + .dtype_format(DataType.I32_Default, DataType.I64_Default, DataType.I16_Default) \ + .dtype_format(DataType.I32_Default, DataType.I64_Default, DataType.U32_Default) \ + .dtype_format(DataType.I32_Default, DataType.I64_Default, DataType.U16_Default) \ + .dtype_format(DataType.I32_Default, DataType.I64_Default, DataType.U8_Default) \ + .dtype_format(DataType.I32_Default, DataType.I64_Default, DataType.U64_Default) \ + .dtype_format(DataType.I32_Default, DataType.I64_Default, DataType.F16_Default) \ + .dtype_format(DataType.I32_Default, DataType.I64_Default, DataType.F32_Default) \ + .dtype_format(DataType.I32_Default, DataType.I64_Default, DataType.BOOL_Default) \ + .get_op_info() + +@op_info_register(tensor_array_read_op_info) +def _tensor_array_read_cpu(): + """TensorArrayRead cpu register""" + return diff --git a/mindspore/ops/_op_impl/cpu/tensor_array_size.py b/mindspore/ops/_op_impl/cpu/tensor_array_size.py new file mode 100644 index 00000000000..51b480f43fa --- /dev/null +++ b/mindspore/ops/_op_impl/cpu/tensor_array_size.py @@ -0,0 +1,28 @@ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +"""TensorArraySize op""" +from mindspore.ops.op_info_register import op_info_register, CpuRegOp, DataType + +tensor_array_size_op_info = CpuRegOp("TensorArraySize") \ + .input(0, "handle", "required") \ + .output(0, "y", "required") \ + .dtype_format(DataType.I64_Default, DataType.I64_Default) \ + .get_op_info() + +@op_info_register(tensor_array_size_op_info) +def _tensor_array_size_cpu(): + """TensorArraySize cpu register""" + return diff --git a/mindspore/ops/_op_impl/cpu/tensor_array_stack.py b/mindspore/ops/_op_impl/cpu/tensor_array_stack.py new file mode 100644 index 00000000000..d97bd94a658 --- /dev/null +++ b/mindspore/ops/_op_impl/cpu/tensor_array_stack.py @@ -0,0 +1,37 @@ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +"""TensorArrayStack op""" +from mindspore.ops.op_info_register import op_info_register, CpuRegOp, DataType + +tensor_array_stack_op_info = CpuRegOp("TensorArrayStack") \ + .input(0, "handle", "required") \ + .output(0, "y", "required") \ + .dtype_format(DataType.I64_Default, DataType.I64_Default) \ + .dtype_format(DataType.I64_Default, DataType.I32_Default) \ + .dtype_format(DataType.I64_Default, DataType.I16_Default) \ + .dtype_format(DataType.I64_Default, DataType.U32_Default) \ + .dtype_format(DataType.I64_Default, DataType.U16_Default) \ + .dtype_format(DataType.I64_Default, DataType.U8_Default) \ + .dtype_format(DataType.I64_Default, DataType.U64_Default) \ + .dtype_format(DataType.I64_Default, DataType.F16_Default) \ + .dtype_format(DataType.I64_Default, DataType.F32_Default) \ + .dtype_format(DataType.I64_Default, DataType.BOOL_Default) \ + .get_op_info() + +@op_info_register(tensor_array_stack_op_info) +def _tensor_array_stack_cpu(): + """TensorArrayStack cpu register""" + return diff --git a/mindspore/ops/_op_impl/cpu/tensor_array_write.py b/mindspore/ops/_op_impl/cpu/tensor_array_write.py new file mode 100644 index 00000000000..a7084704629 --- /dev/null +++ b/mindspore/ops/_op_impl/cpu/tensor_array_write.py @@ -0,0 +1,49 @@ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +"""TensorArrayWrite op""" +from mindspore.ops.op_info_register import op_info_register, CpuRegOp, DataType + +tensor_array_write_op_info = CpuRegOp("TensorArrayWrite") \ + .input(0, "handle", "required") \ + .input(1, "index", "required") \ + .input(2, "value", "required") \ + .output(0, "y", "required") \ + .dtype_format(DataType.I64_Default, DataType.I64_Default, DataType.I64_Default, DataType.I64_Default) \ + .dtype_format(DataType.I64_Default, DataType.I64_Default, DataType.I32_Default, DataType.I64_Default) \ + .dtype_format(DataType.I64_Default, DataType.I64_Default, DataType.I16_Default, DataType.I64_Default) \ + .dtype_format(DataType.I64_Default, DataType.I64_Default, DataType.U32_Default, DataType.I64_Default) \ + .dtype_format(DataType.I64_Default, DataType.I64_Default, DataType.U16_Default, DataType.I64_Default) \ + .dtype_format(DataType.I64_Default, DataType.I64_Default, DataType.U8_Default, DataType.I64_Default) \ + .dtype_format(DataType.I64_Default, DataType.I64_Default, DataType.U64_Default, DataType.I64_Default) \ + .dtype_format(DataType.I64_Default, DataType.I64_Default, DataType.F16_Default, DataType.I64_Default) \ + .dtype_format(DataType.I64_Default, DataType.I64_Default, DataType.F32_Default, DataType.I64_Default) \ + .dtype_format(DataType.I64_Default, DataType.I64_Default, DataType.BOOL_Default, DataType.I64_Default) \ + .dtype_format(DataType.I64_Default, DataType.I32_Default, DataType.I64_Default, DataType.I64_Default) \ + .dtype_format(DataType.I64_Default, DataType.I32_Default, DataType.I32_Default, DataType.I64_Default) \ + .dtype_format(DataType.I64_Default, DataType.I32_Default, DataType.I16_Default, DataType.I64_Default) \ + .dtype_format(DataType.I64_Default, DataType.I32_Default, DataType.U32_Default, DataType.I64_Default) \ + .dtype_format(DataType.I64_Default, DataType.I32_Default, DataType.U16_Default, DataType.I64_Default) \ + .dtype_format(DataType.I64_Default, DataType.I32_Default, DataType.U8_Default, DataType.I64_Default) \ + .dtype_format(DataType.I64_Default, DataType.I32_Default, DataType.U64_Default, DataType.I64_Default) \ + .dtype_format(DataType.I64_Default, DataType.I32_Default, DataType.F16_Default, DataType.I64_Default) \ + .dtype_format(DataType.I64_Default, DataType.I32_Default, DataType.F32_Default, DataType.I64_Default) \ + .dtype_format(DataType.I64_Default, DataType.I32_Default, DataType.BOOL_Default, DataType.I64_Default) \ + .get_op_info() + +@op_info_register(tensor_array_write_op_info) +def _tensor_array_write_cpu(): + """TensorArrayWrite cpu register""" + return diff --git a/tests/st/ops/cpu/test_tensor_array.py b/tests/st/ops/cpu/test_tensor_array.py new file mode 100644 index 00000000000..3c92431322e --- /dev/null +++ b/tests/st/ops/cpu/test_tensor_array.py @@ -0,0 +1,86 @@ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +import numpy as np +import pytest + +import mindspore +import mindspore.context as context +import mindspore.nn as nn +from mindspore import Tensor + +class TensorArrayNet(nn.Cell): + def __init__(self, dtype, element_shape): + super(TensorArrayNet, self).__init__() + self.ta = nn.TensorArray(dtype, element_shape) + + def construct(self, index, value): + self.ta.write(index, value) + v = self.ta.read(index) + s = self.ta.stack() + self.ta.close() + return v, s + +@pytest.mark.level0 +@pytest.mark.platform_x86_cpu +@pytest.mark.env_onecard +def test_tensorarray(): + """ + Feature: TensorArray gpu TEST. + Description: Test the function write, read, stack, clear, close in both graph and pynative mode. + Expectation: success. + """ + context.set_context(mode=context.GRAPH_MODE, device_target="CPU") + index = Tensor(0, mindspore.int64) + value = Tensor(5, mindspore.int64) + ta = TensorArrayNet(dtype=mindspore.int64, element_shape=()) + v, s = ta(index, value) + expect_v = 5 + expect_s = [5] + assert np.allclose(s.asnumpy(), expect_s) + assert np.allclose(v.asnumpy(), expect_v) + + context.set_context(mode=context.PYNATIVE_MODE, device_target="CPU") + ta = nn.TensorArray(mindspore.int64, ()) + for i in range(5): + ta.write(i, 99) + v = ta.read(0) + s = ta.stack() + expect_v = 99 + expect_s = [99, 99, 99, 99, 99] + assert np.allclose(s.asnumpy(), expect_s) + assert np.allclose(v.asnumpy(), expect_v) + ta_size = ta.size() + assert np.allclose(ta_size.asnumpy(), 5) + ta.clear() + ta_size = ta.size() + assert np.allclose(ta_size.asnumpy(), 0) + ta.write(0, 88) + v = ta.read(0) + s = ta.stack() + ta.close() + expect_v = 88 + expect_s = [88] + assert np.allclose(s.asnumpy(), expect_s) + assert np.allclose(v.asnumpy(), expect_v) + ta = nn.TensorArray(mindspore.float32, ()) + ta.write(5, 1.) + s = ta.stack() + expect_s = [0., 0., 0., 0., 0., 1.] + assert np.allclose(s.asnumpy(), expect_s) + ta.write(2, 1.) + s = ta.stack() + expect_s = [0., 0., 1., 0., 0., 1.] + assert np.allclose(s.asnumpy(), expect_s) + ta.close()