!26959 TensorArray CPU

Merge pull request !26959 from VectorSL/tensor-array-cpu
This commit is contained in:
i-robot 2021-12-01 03:31:26 +00:00 committed by Gitee
commit 84d2e54871
33 changed files with 1379 additions and 55 deletions

View File

@ -61,18 +61,16 @@ void CPUKernelFactory::SetKernelAttrs(const std::shared_ptr<kernel::OpInfo> op_i
MS_EXCEPTION_IF_NULL(op_info); MS_EXCEPTION_IF_NULL(op_info);
auto inputs_ptr = op_info->inputs_ptr(); auto inputs_ptr = op_info->inputs_ptr();
auto outputs_ptr = op_info->outputs_ptr(); auto outputs_ptr = op_info->outputs_ptr();
if (inputs_ptr.empty()) { if (outputs_ptr.empty()) {
MS_LOG(EXCEPTION) << "op " << op_info->op_name() << " input size is zero."; MS_LOG(EXCEPTION) << "op " << op_info->op_name() << " output size is zero.";
} }
auto first_input_dtypes = inputs_ptr[0]->dtypes(); auto first_output_dtypes = outputs_ptr[0]->dtypes();
auto input_formats = inputs_ptr[0]->formats();
for (size_t i = 0; i < first_input_dtypes.size(); i++) { for (size_t i = 0; i < first_output_dtypes.size(); i++) {
KernelAttr kernel_attr; KernelAttr kernel_attr;
(void)kernel_attr.AddInputAttr(kernel::DtypeToTypeId(first_input_dtypes[i]), input_formats[i]); for (size_t j = 0; j < inputs_ptr.size(); j++) {
for (size_t j = 1; j < inputs_ptr.size(); j++) {
auto input_dtypes = inputs_ptr[j]->dtypes(); auto input_dtypes = inputs_ptr[j]->dtypes();
input_formats = inputs_ptr[j]->formats(); auto input_formats = inputs_ptr[j]->formats();
(void)kernel_attr.AddInputAttr(kernel::DtypeToTypeId(input_dtypes[i]), input_formats[i]); (void)kernel_attr.AddInputAttr(kernel::DtypeToTypeId(input_dtypes[i]), input_formats[i]);
} }
for (size_t j = 0; j < outputs_ptr.size(); j++) { for (size_t j = 0; j < outputs_ptr.size(); j++) {

View File

@ -0,0 +1,53 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "backend/kernel_compiler/cpu/rl/tensor_array_clear_kernel.h"
#include "backend/kernel_compiler/common_utils.h"
#include "runtime/device/cpu/cpu_tensor_array.h"
#include "runtime/device/tensor_array_manager.h"
namespace mindspore {
namespace kernel {
using mindspore::device::TensorArrayMgr;
using mindspore::device::cpu::CPUTensorArray;
using mindspore::device::cpu::CPUTensorArrayPtr;
TensorArrayCPUClearKernel::TensorArrayCPUClearKernel() {}
const std::vector<size_t> &TensorArrayCPUClearKernel::GetInputSizeList() const { return input_size_list_; }
const std::vector<size_t> &TensorArrayCPUClearKernel::GetOutputSizeList() const { return output_size_list_; }
const std::vector<size_t> &TensorArrayCPUClearKernel::GetWorkspaceSizeList() const { return workspace_size_list_; }
void TensorArrayCPUClearKernel::InitKernel(const CNodePtr &kernel_node) {
MS_EXCEPTION_IF_NULL(kernel_node);
input_size_list_.push_back(sizeof(int64_t));
output_size_list_.push_back(sizeof(int64_t));
}
bool TensorArrayCPUClearKernel::Launch(const std::vector<AddressPtr> &inputs, const std::vector<AddressPtr> &,
const std::vector<AddressPtr> &) {
auto handle_addr = GetDeviceAddress<int64_t>(inputs, 0);
MS_EXCEPTION_IF_NULL(handle_addr);
CPUTensorArrayPtr tensors_ =
std::dynamic_pointer_cast<CPUTensorArray>(TensorArrayMgr::GetInstance().GetTensorArray(handle_addr));
MS_ERROR_IF_NULL(tensors_);
// Clear TensorArray valid size, but keep the memory.
tensors_->Clear();
return true;
}
} // namespace kernel
} // namespace mindspore

View File

@ -0,0 +1,48 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_RL_TENSOR_ARRAY_CLEAR_KERNEL_H_
#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_RL_TENSOR_ARRAY_CLEAR_KERNEL_H_
#include <vector>
#include <string>
#include "backend/kernel_compiler/cpu/cpu_kernel.h"
#include "backend/kernel_compiler/cpu/cpu_kernel_factory.h"
namespace mindspore {
namespace kernel {
class TensorArrayCPUClearKernel : public CPUKernel {
public:
TensorArrayCPUClearKernel();
~TensorArrayCPUClearKernel() = default;
const std::vector<size_t> &GetInputSizeList() const override;
const std::vector<size_t> &GetOutputSizeList() const override;
const std::vector<size_t> &GetWorkspaceSizeList() const override;
bool Launch(const std::vector<AddressPtr> &inputs, const std::vector<AddressPtr> &workspace,
const std::vector<AddressPtr> &outputs) override;
void InitKernel(const CNodePtr &kernel_node) override;
private:
std::vector<size_t> input_size_list_;
std::vector<size_t> output_size_list_;
std::vector<size_t> workspace_size_list_;
};
MS_REG_CPU_KERNEL(TensorArrayClear, KernelAttr(), TensorArrayCPUClearKernel)
} // namespace kernel
} // namespace mindspore
#endif // MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_RL_TENSOR_ARRAY_CLEAR_KERNEL_H_

View File

@ -0,0 +1,57 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "backend/kernel_compiler/cpu/rl/tensor_array_close_kernel.h"
#include "backend/kernel_compiler/common_utils.h"
#include "runtime/device/cpu/cpu_tensor_array.h"
#include "runtime/device/tensor_array_manager.h"
namespace mindspore {
namespace kernel {
using mindspore::device::TensorArrayMgr;
using mindspore::device::cpu::CPUTensorArray;
using mindspore::device::cpu::CPUTensorArrayPtr;
TensorArrayCPUCloseKernel::TensorArrayCPUCloseKernel() {}
const std::vector<size_t> &TensorArrayCPUCloseKernel::GetInputSizeList() const { return input_size_list_; }
const std::vector<size_t> &TensorArrayCPUCloseKernel::GetOutputSizeList() const { return output_size_list_; }
const std::vector<size_t> &TensorArrayCPUCloseKernel::GetWorkspaceSizeList() const { return workspace_size_list_; }
void TensorArrayCPUCloseKernel::InitKernel(const CNodePtr &kernel_node) {
MS_EXCEPTION_IF_NULL(kernel_node);
input_size_list_.push_back(sizeof(int64_t));
output_size_list_.push_back(sizeof(int64_t));
}
bool TensorArrayCPUCloseKernel::Launch(const std::vector<AddressPtr> &inputs, const std::vector<AddressPtr> &,
const std::vector<AddressPtr> &) {
auto handle_addr = GetDeviceAddress<int64_t>(inputs, 0);
MS_EXCEPTION_IF_NULL(handle_addr);
CPUTensorArrayPtr tensors_ =
std::dynamic_pointer_cast<CPUTensorArray>(TensorArrayMgr::GetInstance().GetTensorArray(handle_addr));
MS_ERROR_IF_NULL(tensors_);
// Free device mem
tensors_->Free();
// Erase tensorarray
if (!TensorArrayMgr::GetInstance().EraseTensorArray(handle_addr)) {
MS_LOG(EXCEPTION) << "Free tensorarray failed";
}
return true;
}
} // namespace kernel
} // namespace mindspore

View File

@ -0,0 +1,48 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_RL_TENSOR_ARRAY_CLOSE_KERNEL_H_
#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_RL_TENSOR_ARRAY_CLOSE_KERNEL_H_
#include <vector>
#include <string>
#include "backend/kernel_compiler/cpu/cpu_kernel.h"
#include "backend/kernel_compiler/cpu/cpu_kernel_factory.h"
namespace mindspore {
namespace kernel {
class TensorArrayCPUCloseKernel : public CPUKernel {
public:
TensorArrayCPUCloseKernel();
~TensorArrayCPUCloseKernel() = default;
const std::vector<size_t> &GetInputSizeList() const override;
const std::vector<size_t> &GetOutputSizeList() const override;
const std::vector<size_t> &GetWorkspaceSizeList() const override;
bool Launch(const std::vector<AddressPtr> &inputs, const std::vector<AddressPtr> &workspace,
const std::vector<AddressPtr> &outputs) override;
void InitKernel(const CNodePtr &kernel_node) override;
private:
std::vector<size_t> input_size_list_;
std::vector<size_t> output_size_list_;
std::vector<size_t> workspace_size_list_;
};
MS_REG_CPU_KERNEL(TensorArrayClose, KernelAttr(), TensorArrayCPUCloseKernel)
} // namespace kernel
} // namespace mindspore
#endif // MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_RL_TENSOR_ARRAY_CLOSE_KERNEL_H_

View File

@ -0,0 +1,68 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <memory>
#include "backend/kernel_compiler/cpu/rl/tensor_array_create_kernel.h"
#include "backend/kernel_compiler/common_utils.h"
#include "runtime/device/cpu/cpu_tensor_array.h"
#include "runtime/device/tensor_array_manager.h"
namespace mindspore {
namespace kernel {
using mindspore::device::TensorArrayMgr;
using mindspore::device::cpu::CPUTensorArray;
using mindspore::device::cpu::CPUTensorArrayPtr;
TensorArrayCPUCreateKernel::TensorArrayCPUCreateKernel() : is_dynamic_(true), size_(0), type_(nullptr) {}
const std::vector<size_t> &TensorArrayCPUCreateKernel::GetInputSizeList() const { return input_size_list_; }
const std::vector<size_t> &TensorArrayCPUCreateKernel::GetOutputSizeList() const { return output_size_list_; }
const std::vector<size_t> &TensorArrayCPUCreateKernel::GetWorkspaceSizeList() const { return workspace_size_list_; }
void TensorArrayCPUCreateKernel::InitKernel(const CNodePtr &kernel_node) {
MS_EXCEPTION_IF_NULL(kernel_node);
auto shape = AnfAlgo::GetNodeAttr<std::vector<int64_t>>(kernel_node, "element_shape");
for (auto i : shape) {
shapes_.push_back(LongToSize(i));
}
type_ = AnfAlgo::GetNodeAttr<TypePtr>(kernel_node, "dtype");
size_ = AnfAlgo::GetNodeAttr<int64_t>(kernel_node, "size");
is_dynamic_ = AnfAlgo::GetNodeAttr<bool>(kernel_node, "dynamic_size");
name_ = AnfAlgo::GetNodeAttr<std::string>(kernel_node, "name");
output_size_list_.push_back(sizeof(int64_t));
}
bool TensorArrayCPUCreateKernel::Launch(const std::vector<AddressPtr> &, const std::vector<AddressPtr> &,
const std::vector<AddressPtr> &outputs) {
// Create a tensorarray, and generate an unique handle.
int64_t tensor_array_handle = TensorArrayMgr::GetInstance().GetHandleCount();
auto name = "CPUTensorArray_" + name_ + "_" + std::to_string(tensor_array_handle);
CPUTensorArrayPtr tensor_array = std::make_shared<CPUTensorArray>(name, type_, shapes_);
MS_EXCEPTION_IF_NULL(tensor_array);
tensor_array->SetMaxSize(size_, is_dynamic_);
auto out_addr = GetDeviceAddress<int64_t>(outputs, 0);
MS_EXCEPTION_IF_NULL(out_addr);
// Set handle to out_addr.
out_addr[0] = tensor_array_handle;
MS_LOG(DEBUG) << "Create handle id " << tensor_array_handle;
// Put tensorarray to a saved map : map<handle, tensorarray> in tensorarray manager.
// Only put the device addr as the key to avoid a copy from device to host.
// The output handle address will kept and won't be reused.
TensorArrayMgr::GetInstance().AddTensorArray(out_addr, tensor_array);
return true;
}
} // namespace kernel
} // namespace mindspore

View File

@ -0,0 +1,53 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_RL_TENSOR_ARRAY_CREATE_KERNEL_H_
#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_RL_TENSOR_ARRAY_CREATE_KERNEL_H_
#include <vector>
#include <string>
#include "backend/kernel_compiler/cpu/cpu_kernel.h"
#include "backend/kernel_compiler/cpu/cpu_kernel_factory.h"
namespace mindspore {
namespace kernel {
class TensorArrayCPUCreateKernel : public CPUKernel {
public:
TensorArrayCPUCreateKernel();
~TensorArrayCPUCreateKernel() = default;
const std::vector<size_t> &GetInputSizeList() const override;
const std::vector<size_t> &GetOutputSizeList() const override;
const std::vector<size_t> &GetWorkspaceSizeList() const override;
bool Launch(const std::vector<AddressPtr> &inputs, const std::vector<AddressPtr> &workspace,
const std::vector<AddressPtr> &outputs) override;
void InitKernel(const CNodePtr &kernel_node) override;
private:
bool is_dynamic_;
int64_t size_;
std::vector<size_t> shapes_;
TypePtr type_;
std::string name_;
std::vector<size_t> input_size_list_;
std::vector<size_t> output_size_list_;
std::vector<size_t> workspace_size_list_;
};
MS_REG_CPU_KERNEL(TensorArray, KernelAttr(), TensorArrayCPUCreateKernel)
} // namespace kernel
} // namespace mindspore
#endif // MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_RL_TENSOR_ARRAY_CREATE_KERNEL_H_

View File

@ -0,0 +1,72 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "backend/kernel_compiler/cpu/rl/tensor_array_read_kernel.h"
#include "backend/kernel_compiler/common_utils.h"
#include "runtime/device/cpu/cpu_tensor_array.h"
#include "runtime/device/tensor_array_manager.h"
namespace mindspore {
namespace kernel {
using mindspore::device::TensorArrayMgr;
using mindspore::device::cpu::CPUTensorArray;
using mindspore::device::cpu::CPUTensorArrayPtr;
TensorArrayCPUReadKernel::TensorArrayCPUReadKernel() : value_size_(0), type_(nullptr) {}
const std::vector<size_t> &TensorArrayCPUReadKernel::GetInputSizeList() const { return input_size_list_; }
const std::vector<size_t> &TensorArrayCPUReadKernel::GetOutputSizeList() const { return output_size_list_; }
const std::vector<size_t> &TensorArrayCPUReadKernel::GetWorkspaceSizeList() const { return workspace_size_list_; }
void TensorArrayCPUReadKernel::InitKernel(const CNodePtr &kernel_node) {
MS_EXCEPTION_IF_NULL(kernel_node);
shapes_ = AnfAlgo::GetNodeAttr<std::vector<int64_t>>(kernel_node, "element_shape");
type_ = AnfAlgo::GetNodeAttr<TypePtr>(kernel_node, "dtype");
value_size_ = GetTypeByte(type_);
for (auto i : shapes_) {
value_size_ *= i;
}
input_size_list_.push_back(sizeof(int64_t));
input_size_list_.push_back(sizeof(int64_t));
output_size_list_.push_back(value_size_);
}
bool TensorArrayCPUReadKernel::Launch(const std::vector<AddressPtr> &inputs, const std::vector<AddressPtr> &,
const std::vector<AddressPtr> &outputs) {
auto handle_addr = GetDeviceAddress<int64_t>(inputs, 0);
auto index = GetDeviceAddress<int64_t>(inputs, 1);
auto out_value = GetDeviceAddress<unsigned char>(outputs, 0);
MS_EXCEPTION_IF_NULL(handle_addr);
MS_EXCEPTION_IF_NULL(index);
MS_EXCEPTION_IF_NULL(out_value);
int64_t index_host = index[0];
CPUTensorArrayPtr tensors_ =
std::dynamic_pointer_cast<CPUTensorArray>(TensorArrayMgr::GetInstance().GetTensorArray(handle_addr));
MS_ERROR_IF_NULL(tensors_);
if (!tensors_->CheckReadIndexLogical(index_host)) {
MS_LOG(EXCEPTION) << "Invalid index " << index_host << " for read.";
}
auto value_addr = tensors_->Read(index_host);
MS_LOG(DEBUG) << "Read value index:" << index_host;
auto ret = memcpy_s(out_value, value_size_, value_addr->addr, value_size_);
if (ret != EOK) {
MS_LOG(EXCEPTION) << "Memcpy failed.";
}
return true;
}
} // namespace kernel
} // namespace mindspore

View File

@ -0,0 +1,53 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_RL_TENSOR_ARRAY_READ_KERNEL_H_
#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_RL_TENSOR_ARRAY_READ_KERNEL_H_
#include <vector>
#include <string>
#include "backend/kernel_compiler/cpu/cpu_kernel.h"
#include "backend/kernel_compiler/cpu/cpu_kernel_factory.h"
namespace mindspore {
namespace kernel {
class TensorArrayCPUReadKernel : public CPUKernel {
public:
TensorArrayCPUReadKernel();
~TensorArrayCPUReadKernel() = default;
const std::vector<size_t> &GetInputSizeList() const override;
const std::vector<size_t> &GetOutputSizeList() const override;
const std::vector<size_t> &GetWorkspaceSizeList() const override;
bool Launch(const std::vector<AddressPtr> &inputs, const std::vector<AddressPtr> &workspace,
const std::vector<AddressPtr> &outputs) override;
void InitKernel(const CNodePtr &kernel_node) override;
private:
size_t value_size_;
std::vector<int64_t> shapes_;
TypePtr type_;
std::vector<size_t> input_size_list_;
std::vector<size_t> output_size_list_;
std::vector<size_t> workspace_size_list_;
};
MS_REG_CPU_KERNEL(TensorArrayRead, KernelAttr(), TensorArrayCPUReadKernel)
} // namespace kernel
} // namespace mindspore
#endif // MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_RL_TENSOR_ARRAY_READ_KERNEL_H_

View File

@ -0,0 +1,53 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "backend/kernel_compiler/cpu/rl/tensor_array_size_kernel.h"
#include "backend/kernel_compiler/common_utils.h"
#include "runtime/device/cpu/cpu_tensor_array.h"
#include "runtime/device/tensor_array_manager.h"
namespace mindspore {
namespace kernel {
using mindspore::device::TensorArrayMgr;
TensorArrayCPUSizeKernel::TensorArrayCPUSizeKernel() {}
const std::vector<size_t> &TensorArrayCPUSizeKernel::GetInputSizeList() const { return input_size_list_; }
const std::vector<size_t> &TensorArrayCPUSizeKernel::GetOutputSizeList() const { return output_size_list_; }
const std::vector<size_t> &TensorArrayCPUSizeKernel::GetWorkspaceSizeList() const { return workspace_size_list_; }
void TensorArrayCPUSizeKernel::InitKernel(const CNodePtr &kernel_node) {
MS_EXCEPTION_IF_NULL(kernel_node);
input_size_list_.push_back(sizeof(int64_t));
output_size_list_.push_back(sizeof(int64_t));
}
bool TensorArrayCPUSizeKernel::Launch(const std::vector<AddressPtr> &inputs, const std::vector<AddressPtr> &,
const std::vector<AddressPtr> &outputs) {
auto handle_addr = GetDeviceAddress<int64_t>(inputs, 0);
auto out_addr = GetDeviceAddress<int64_t>(outputs, 0);
MS_EXCEPTION_IF_NULL(handle_addr);
MS_EXCEPTION_IF_NULL(out_addr);
auto tensors_ = TensorArrayMgr::GetInstance().GetTensorArray(handle_addr);
MS_ERROR_IF_NULL(tensors_);
int64_t valid_size = SizeToLong(tensors_->GetValidSize());
out_addr[0] = valid_size;
MS_LOG(DEBUG) << "Launch TensorArraySize, valid size is " << out_addr[0];
return true;
}
} // namespace kernel
} // namespace mindspore

View File

@ -0,0 +1,48 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_RL_TENSOR_ARRAY_SIZE_KERNEL_H_
#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_RL_TENSOR_ARRAY_SIZE_KERNEL_H_
#include <vector>
#include <string>
#include "backend/kernel_compiler/cpu/cpu_kernel.h"
#include "backend/kernel_compiler/cpu/cpu_kernel_factory.h"
namespace mindspore {
namespace kernel {
class TensorArrayCPUSizeKernel : public CPUKernel {
public:
TensorArrayCPUSizeKernel();
~TensorArrayCPUSizeKernel() = default;
const std::vector<size_t> &GetInputSizeList() const override;
const std::vector<size_t> &GetOutputSizeList() const override;
const std::vector<size_t> &GetWorkspaceSizeList() const override;
bool Launch(const std::vector<AddressPtr> &inputs, const std::vector<AddressPtr> &workspace,
const std::vector<AddressPtr> &outputs) override;
void InitKernel(const CNodePtr &kernel_node) override;
private:
std::vector<size_t> input_size_list_;
std::vector<size_t> output_size_list_;
std::vector<size_t> workspace_size_list_;
};
MS_REG_CPU_KERNEL(TensorArraySize, KernelAttr(), TensorArrayCPUSizeKernel)
} // namespace kernel
} // namespace mindspore
#endif // MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_RL_TENSOR_ARRAY_SIZE_KERNEL_H_

View File

@ -0,0 +1,100 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "backend/kernel_compiler/cpu/rl/tensor_array_stack_kernel.h"
#include <algorithm>
#include "backend/kernel_compiler/common_utils.h"
#include "runtime/device/cpu/cpu_tensor_array.h"
#include "runtime/device/tensor_array_manager.h"
namespace mindspore {
namespace kernel {
using mindspore::device::TensorArrayMgr;
using mindspore::device::cpu::CPUTensorArray;
using mindspore::device::cpu::CPUTensorArrayPtr;
TensorArrayCPUStackKernel::TensorArrayCPUStackKernel()
: handle_(nullptr), value_size_(0), ele_size_(0), type_(nullptr) {
ResetResource();
}
const std::vector<size_t> &TensorArrayCPUStackKernel::GetInputSizeList() const { return input_size_list_; }
const std::vector<size_t> &TensorArrayCPUStackKernel::GetOutputSizeList() const { return output_size_list_; }
const std::vector<size_t> &TensorArrayCPUStackKernel::GetWorkspaceSizeList() const { return workspace_size_list_; }
void TensorArrayCPUStackKernel::InitKernel(const CNodePtr &kernel_node) {
MS_EXCEPTION_IF_NULL(kernel_node);
kernel_node_ = kernel_node;
auto shape = AnfAlgo::GetNodeAttr<std::vector<int64_t>>(kernel_node, "element_shape");
auto max_element = AnfAlgo::GetNodeAttr<int64_t>(kernel_node, "max_element");
for (auto i : shape) {
shapes_.push_back(LongToSize(i));
}
type_ = AnfAlgo::GetNodeAttr<TypePtr>(kernel_node, "dtype");
ele_size_ = GetTypeByte(type_);
for (auto i : shapes_) {
ele_size_ *= i;
}
value_size_ = ele_size_ * LongToSize(max_element);
output_size_list_.push_back(value_size_);
input_size_list_.push_back(sizeof(int64_t));
}
void TensorArrayCPUStackKernel::PostExecute() {
CPUTensorArrayPtr tensors_ =
std::dynamic_pointer_cast<CPUTensorArray>(TensorArrayMgr::GetInstance().GetTensorArray(handle_));
MS_EXCEPTION_IF_NULL(tensors_);
size_t tensor_size = tensors_->GetValidSize();
auto shape = shapes_;
shape.insert(shape.begin(), tensor_size);
MS_LOG(DEBUG) << "After postexecute, the real shape of TensorArrayStack is " << shape;
AnfAlgo::SetOutputInferTypeAndShape({type_->type_id()}, {shape}, kernel_node_.lock().get());
}
void TensorArrayCPUStackKernel::ResetResource() noexcept {
handle_ = nullptr;
value_size_ = 0;
ele_size_ = 0;
shapes_.clear();
input_size_list_.clear();
output_size_list_.clear();
workspace_size_list_.clear();
}
bool TensorArrayCPUStackKernel::Launch(const std::vector<AddressPtr> &inputs, const std::vector<AddressPtr> &,
const std::vector<AddressPtr> &outputs) {
handle_ = GetDeviceAddress<int64_t>(inputs, 0);
auto out_value = GetDeviceAddress<unsigned char>(outputs, 0);
MS_EXCEPTION_IF_NULL(out_value);
MS_EXCEPTION_IF_NULL(handle_);
CPUTensorArrayPtr tensors_ =
std::dynamic_pointer_cast<CPUTensorArray>(TensorArrayMgr::GetInstance().GetTensorArray(handle_));
MS_EXCEPTION_IF_NULL(tensors_);
if (tensors_->GetValidSize() > tensors_->GetRealSize()) {
MS_LOG(EXCEPTION) << "Invalid TensorArray size, maybe should Clear() TensorArray before next usage.";
}
for (size_t i = 0; i < tensors_->GetValidSize(); i++) {
auto ret = memcpy_s(out_value + ele_size_ * i, ele_size_, tensors_->GetTensorAddr(i), ele_size_);
if (ret != EOK) {
MS_LOG(EXCEPTION) << "Memcpy failed.";
}
}
PostExecute();
return true;
}
} // namespace kernel
} // namespace mindspore

View File

@ -0,0 +1,60 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_RL_TENSOR_ARRAY_STACK_KERNEL_H_
#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_RL_TENSOR_ARRAY_STACK_KERNEL_H_
#include <string>
#include <vector>
#include <atomic>
#include "backend/kernel_compiler/cpu/cpu_kernel.h"
#include "backend/kernel_compiler/cpu/cpu_kernel_factory.h"
namespace mindspore {
namespace kernel {
class TensorArrayCPUStackKernel : public CPUKernel {
public:
TensorArrayCPUStackKernel();
~TensorArrayCPUStackKernel() = default;
const std::vector<size_t> &GetInputSizeList() const override;
const std::vector<size_t> &GetOutputSizeList() const override;
const std::vector<size_t> &GetWorkspaceSizeList() const override;
bool Launch(const std::vector<AddressPtr> &inputs, const std::vector<AddressPtr> &workspace,
const std::vector<AddressPtr> &outputs) override;
void InitKernel(const CNodePtr &kernel_node) override;
protected:
void PostExecute();
void ResetResource() noexcept;
private:
CNodeWeakPtr kernel_node_;
int64_t *handle_;
int64_t value_size_;
int64_t ele_size_;
std::vector<size_t> shapes_;
TypePtr type_;
std::vector<size_t> input_size_list_;
std::vector<size_t> output_size_list_;
std::vector<size_t> workspace_size_list_;
};
MS_REG_CPU_KERNEL(TensorArrayStack, KernelAttr(), TensorArrayCPUStackKernel)
} // namespace kernel
} // namespace mindspore
#endif // MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_RL_TENSOR_ARRAY_STACK_KERNEL_H_

View File

@ -0,0 +1,87 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "backend/kernel_compiler/cpu/rl/tensor_array_write_kernel.h"
#include <memory>
#include "backend/kernel_compiler/common_utils.h"
#include "runtime/device/cpu/cpu_tensor_array.h"
#include "runtime/device/tensor_array_manager.h"
#include "runtime/hardware/cpu/cpu_memory_pool.h"
namespace mindspore {
namespace kernel {
constexpr size_t kSecondInputIndex = 2;
using mindspore::device::TensorArrayMgr;
using mindspore::device::cpu::CPUTensorArray;
using mindspore::device::cpu::CPUTensorArrayPtr;
TensorArrayCPUWriteKernel::TensorArrayCPUWriteKernel() : value_size_(0) {}
const std::vector<size_t> &TensorArrayCPUWriteKernel::GetInputSizeList() const { return input_size_list_; }
const std::vector<size_t> &TensorArrayCPUWriteKernel::GetOutputSizeList() const { return output_size_list_; }
const std::vector<size_t> &TensorArrayCPUWriteKernel::GetWorkspaceSizeList() const { return workspace_size_list_; }
void TensorArrayCPUWriteKernel::InitKernel(const CNodePtr &kernel_node) {
MS_EXCEPTION_IF_NULL(kernel_node);
type_ = AnfAlgo::GetPrevNodeOutputInferDataType(kernel_node, kSecondInputIndex);
shapes_ = AnfAlgo::GetInputDeviceShape(kernel_node, kSecondInputIndex);
value_size_ = GetTypeByte(TypeIdToType(type_));
for (auto i : shapes_) {
value_size_ *= i;
}
input_size_list_.push_back(sizeof(int64_t));
input_size_list_.push_back(sizeof(int64_t));
output_size_list_.push_back(sizeof(int64_t));
}
bool TensorArrayCPUWriteKernel::Launch(const std::vector<AddressPtr> &inputs, const std::vector<AddressPtr> &outputs,
const std::vector<AddressPtr> &) {
auto handle_addr = GetDeviceAddress<int64_t>(inputs, 0);
auto index = GetDeviceAddress<int64_t>(inputs, 1);
auto value = GetDeviceAddress<unsigned char>(inputs, 2);
MS_EXCEPTION_IF_NULL(handle_addr);
MS_EXCEPTION_IF_NULL(index);
MS_EXCEPTION_IF_NULL(value);
int64_t index_host = index[0];
CPUTensorArrayPtr tensors_ =
std::dynamic_pointer_cast<CPUTensorArray>(TensorArrayMgr::GetInstance().GetTensorArray(handle_addr));
MS_EXCEPTION_IF_NULL(tensors_);
if (!tensors_->CheckValue(type_, shapes_)) {
MS_LOG(EXCEPTION) << "Invalid input data for tensor array write op.";
}
// Manage the value : create/reuse a device memory, and copy the input value to it.
AddressPtr dev_addr = std::make_shared<kernel::Address>();
MS_EXCEPTION_IF_NULL(dev_addr);
if (tensors_->GetRealSize() > LongToSize(index_host)) {
dev_addr->addr = tensors_->Read(index_host)->addr;
} else {
dev_addr->addr = mindspore::device::cpu::CPUMemoryPool::GetInstance().AllocTensorMem(value_size_);
MS_LOG(DEBUG) << "Create tensor " << dev_addr->addr << ", size " << value_size_;
}
MS_EXCEPTION_IF_NULL(dev_addr->addr);
dev_addr->size = value_size_;
auto ret = memcpy_s(dev_addr->addr, value_size_, value, value_size_);
if (ret != EOK) {
MS_LOG(EXCEPTION) << "Memcpy failed.";
}
if (tensors_->Write(index_host, dev_addr)) {
MS_LOG(DEBUG) << "Write to tensorarry succeed, index " << index_host;
} else {
MS_LOG(EXCEPTION) << "Failed to write.";
}
return true;
}
} // namespace kernel
} // namespace mindspore

View File

@ -0,0 +1,52 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_RL_TENSOR_ARRAY_WRITE_KERNEL_H_
#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_RL_TENSOR_ARRAY_WRITE_KERNEL_H_
#include <string>
#include <vector>
#include "backend/kernel_compiler/cpu/cpu_kernel.h"
#include "backend/kernel_compiler/cpu/cpu_kernel_factory.h"
namespace mindspore {
namespace kernel {
class TensorArrayCPUWriteKernel : public CPUKernel {
public:
TensorArrayCPUWriteKernel();
~TensorArrayCPUWriteKernel() = default;
const std::vector<size_t> &GetInputSizeList() const override;
const std::vector<size_t> &GetOutputSizeList() const override;
const std::vector<size_t> &GetWorkspaceSizeList() const override;
bool Launch(const std::vector<AddressPtr> &inputs, const std::vector<AddressPtr> &workspace,
const std::vector<AddressPtr> &outputs) override;
void InitKernel(const CNodePtr &kernel_node) override;
private:
size_t value_size_;
std::vector<size_t> shapes_;
TypeId type_;
std::vector<size_t> input_size_list_;
std::vector<size_t> output_size_list_;
std::vector<size_t> workspace_size_list_;
};
MS_REG_CPU_KERNEL(TensorArrayWrite, KernelAttr(), TensorArrayCPUWriteKernel)
} // namespace kernel
} // namespace mindspore
#endif // MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_RL_TENSOR_ARRAY_WRITE_KERNEL_H_

View File

@ -24,7 +24,7 @@ namespace kernel {
using mindspore::device::TensorArrayMgr; using mindspore::device::TensorArrayMgr;
using mindspore::device::gpu::GPUTensorArray; using mindspore::device::gpu::GPUTensorArray;
using mindspore::device::gpu::GPUTensorArrayPtr; using mindspore::device::gpu::GPUTensorArrayPtr;
TensorArrayCreateKernel::TensorArrayCreateKernel() : is_dynamic_(true), size_(0) {} TensorArrayCreateKernel::TensorArrayCreateKernel() : is_dynamic_(true), size_(0), type_(nullptr) {}
const std::vector<size_t> &TensorArrayCreateKernel::GetInputSizeList() const { return input_size_list_; } const std::vector<size_t> &TensorArrayCreateKernel::GetInputSizeList() const { return input_size_list_; }

View File

@ -22,8 +22,6 @@
namespace mindspore { namespace mindspore {
namespace kernel { namespace kernel {
using mindspore::device::TensorArrayMgr; using mindspore::device::TensorArrayMgr;
using mindspore::device::gpu::GPUTensorArray;
using mindspore::device::gpu::GPUTensorArrayPtr;
TensorArraySizeKernel::TensorArraySizeKernel() {} TensorArraySizeKernel::TensorArraySizeKernel() {}
const std::vector<size_t> &TensorArraySizeKernel::GetInputSizeList() const { return input_size_list_; } const std::vector<size_t> &TensorArraySizeKernel::GetInputSizeList() const { return input_size_list_; }
@ -47,8 +45,7 @@ bool TensorArraySizeKernel::Launch(const std::vector<AddressPtr> &inputs, const
const std::vector<AddressPtr> &outputs, void *stream_ptr) { const std::vector<AddressPtr> &outputs, void *stream_ptr) {
auto handle_addr = GetDeviceAddress<int64_t>(inputs, 0); auto handle_addr = GetDeviceAddress<int64_t>(inputs, 0);
auto out_addr = GetDeviceAddress<int64_t>(outputs, 0); auto out_addr = GetDeviceAddress<int64_t>(outputs, 0);
GPUTensorArrayPtr tensors_ = auto tensors_ = TensorArrayMgr::GetInstance().GetTensorArray(handle_addr);
std::dynamic_pointer_cast<GPUTensorArray>(TensorArrayMgr::GetInstance().GetTensorArray(handle_addr));
MS_ERROR_IF_NULL(tensors_); MS_ERROR_IF_NULL(tensors_);
int64_t valid_size = SizeToLong(tensors_->GetValidSize()); int64_t valid_size = SizeToLong(tensors_->GetValidSize());
MS_LOG(DEBUG) << "Launch TensorArraySize, valid size is " << valid_size; MS_LOG(DEBUG) << "Launch TensorArraySize, valid size is " << valid_size;

View File

@ -28,7 +28,7 @@ using mindspore::device::TensorArrayMgr;
using mindspore::device::gpu::GPUTensorArray; using mindspore::device::gpu::GPUTensorArray;
using mindspore::device::gpu::GPUTensorArrayPtr; using mindspore::device::gpu::GPUTensorArrayPtr;
TensorArrayStackKernel::TensorArrayStackKernel() TensorArrayStackKernel::TensorArrayStackKernel()
: handle_(nullptr), value_size_(0), ele_size_(0), stream_ptr_(nullptr) { : handle_(nullptr), value_size_(0), ele_size_(0), stream_ptr_(nullptr), type_(nullptr) {
ResetResource(); ResetResource();
} }

View File

@ -0,0 +1,80 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "runtime/device/cpu/cpu_tensor_array.h"
#include <vector>
#include <string>
#include <memory>
#include "runtime/hardware/cpu/cpu_memory_pool.h"
namespace mindspore {
namespace device {
namespace cpu {
// Add tensor to the TensorArray and increase the size.
// Cast 1: is_dynamic = False and index > max_size_, error.
// Case 2: index > valid_size, fill the rest dev_value with zeros, and set valid_size to index + 1.
// Case 3: index == tensors_.size(), we need to increase both real tensors_ size and valid size, and add
// the new dev_value to tensors_.
// Case 4: tensors_size() > index > valid_size, we can reuse the memory in tensors_[index], so
// only increase the valid_size.
bool CPUTensorArray::Write(const int64_t index, const mindspore::kernel::AddressPtr &dev_value) {
MS_LOG(DEBUG) << "Write dev_value to " << name_;
if (!is_dynamic_ && (index >= max_size_)) {
MS_LOG(ERROR) << name_ << " is not in dynamic size, the max_size is " << max_size_ << ", but get index " << index;
return false;
}
if (LongToSize(index) > valid_size_) {
// Create/reuse (index - valid_size) size dev_value with zeros.
// 1 create new mem : index > real_size ? index - real_size : 0
// 2 reuse old mem : index > real_size ? real_size - valid_size : index - valid_size
// 3 fill zeros : index - valid_size
size_t create_size = (LongToSize(index) > tensors_.size()) ? (LongToSize(index) - tensors_.size()) : 0;
for (size_t i = 0; i < create_size; i++) {
kernel::AddressPtr create_dev = std::make_shared<kernel::Address>();
create_dev->addr = CPUMemoryPool::GetInstance().AllocTensorMem(dev_value->size);
create_dev->size = dev_value->size;
tensors_.push_back(create_dev);
}
tensors_.push_back(dev_value);
// FillZeros(valid_size_, index);
for (size_t i = valid_size_; i < LongToSize(index); i++) {
auto tensor_size = tensors_[i]->size;
(void)memset_s(tensors_[i]->addr, tensor_size, 0, tensors_[i]->size);
}
valid_size_ = LongToSize(index) + 1;
} else if (LongToSize(index) == tensors_.size()) {
MS_LOG(DEBUG) << "Write to index " << index << ", increase tensors' size to " << (tensors_.size() + 1);
tensors_.push_back(dev_value);
valid_size_++;
} else {
MS_LOG(DEBUG) << "Reuse tensors in position " << index << ", tensors size is " << tensors_.size();
if (LongToSize(index) == valid_size_) valid_size_++;
}
return true;
}
// Free() will free the memory in TensorArray.
void CPUTensorArray::Free() {
MS_LOG(DEBUG) << "Free device memory for " << name_;
for (const auto &addr : tensors_) {
if (addr != nullptr) {
CPUMemoryPool::GetInstance().FreeTensorMem(static_cast<void *>(addr->addr));
}
}
}
} // namespace cpu
} // namespace device
} // namespace mindspore

View File

@ -0,0 +1,60 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_RUNTIME_DEVICE_CPU_CPU_TENSOR_ARRAY_H_
#define MINDSPORE_CCSRC_RUNTIME_DEVICE_CPU_CPU_TENSOR_ARRAY_H_
#include <vector>
#include <string>
#include <memory>
#include "runtime/device/tensor_array.h"
namespace mindspore {
namespace device {
namespace cpu {
class CPUTensorArray : public TensorArray {
public:
CPUTensorArray(const string &name, const TypePtr &dtype, const std::vector<size_t> &shapes)
: TensorArray(name, dtype, shapes) {}
~CPUTensorArray() override = default;
bool Write(const int64_t index, const mindspore::kernel::AddressPtr &dev_value) override;
void Free() override;
void Clear() override { valid_size_ = 0; }
size_t GetValidSize() const override { return valid_size_; }
size_t GetRealSize() const override { return tensors_.size(); }
void *GetTensorAddr(const size_t &index) const { return tensors_[index]->addr; }
void SetMaxSize(const int64_t size, const bool is_dynamic) override {
is_dynamic_ = is_dynamic;
if (!is_dynamic) {
max_size_ = size;
}
}
private:
int64_t max_size_;
bool is_dynamic_;
};
using CPUTensorArray = CPUTensorArray;
using CPUTensorArrayPtr = std::shared_ptr<CPUTensorArray>;
} // namespace cpu
} // namespace device
} // namespace mindspore
#endif // MINDSPORE_CCSRC_RUNTIME_DEVICE_CPU_CPU_TENSOR_ARRAY_H_

View File

@ -25,28 +25,6 @@
namespace mindspore { namespace mindspore {
namespace device { namespace device {
namespace gpu { namespace gpu {
bool GPUTensorArray::CheckValue(const TypeId &dtype, const std::vector<size_t> &shape) {
MS_LOG(DEBUG) << "Check the data shape and type for " << name_;
if (dtype != dtype_->type_id()) {
MS_LOG(ERROR) << "Invalid data type " << TypeIdLabel(dtype) << " for " << name_ << ", the origin type is "
<< TypeIdLabel(dtype_->type_id());
return false;
}
if (shape != shapes_) {
MS_LOG(ERROR) << "Invalid data shape " << shape << " for " << name_ << ", the origin shape is " << shapes_;
return false;
}
return true;
}
bool GPUTensorArray::CheckReadIndexLogical(const int64_t index) {
if (LongToSize(index) >= valid_size_) {
MS_LOG(ERROR) << "Index " << index << " out of range " << valid_size_ << ", " << name_;
return false;
}
return true;
}
// Add tensor to the TensorArray and increase the size. // Add tensor to the TensorArray and increase the size.
// Cast 1: is_dynamic = False and index > max_size_, error. // Cast 1: is_dynamic = False and index > max_size_, error.
// Case 2: index > valid_size, fill the rest dev_value with zeros, and set valid_size to index + 1. // Case 2: index > valid_size, fill the rest dev_value with zeros, and set valid_size to index + 1.
@ -89,16 +67,6 @@ bool GPUTensorArray::Write(const int64_t index, const mindspore::kernel::Address
} }
return true; return true;
} }
// Function Read() can get the tensors in the scope of tensors_.
mindspore::kernel::AddressPtr GPUTensorArray::Read(const int64_t index) {
if (LongToSize(index) >= tensors_.size()) {
MS_LOG(EXCEPTION) << "Index " << index << " out of range " << tensors_.size() << ", " << name_;
}
MS_LOG(DEBUG) << "Read tensor index = " << index << ", addr = " << tensors_[LongToSize(index)]->addr;
return tensors_[LongToSize(index)];
}
// Free() will free the memory in TensorArray. // Free() will free the memory in TensorArray.
void GPUTensorArray::Free() { void GPUTensorArray::Free() {
MS_LOG(DEBUG) << "Free device memory for " << name_; MS_LOG(DEBUG) << "Free device memory for " << name_;

View File

@ -32,17 +32,9 @@ class GPUTensorArray : public TensorArray {
: TensorArray(name, dtype, shapes) {} : TensorArray(name, dtype, shapes) {}
~GPUTensorArray() override = default; ~GPUTensorArray() override = default;
// Check the dtype and shape of the input data. Used in Write().
bool CheckValue(const TypeId &dtype, const std::vector<size_t> &shape);
// Check the index in valid range. Used in Read().
bool CheckReadIndexLogical(const int64_t index);
// Add tensor to the TensorArray and increase the size. // Add tensor to the TensorArray and increase the size.
bool Write(const int64_t index, const mindspore::kernel::AddressPtr &dev_value) override; bool Write(const int64_t index, const mindspore::kernel::AddressPtr &dev_value) override;
// Function Read() can get the tensors in the scope of tensors_.
mindspore::kernel::AddressPtr Read(const int64_t index) override;
// FreeTensorArray() will free the memory in TensorArray. // FreeTensorArray() will free the memory in TensorArray.
void Free() override; void Free() override;

View File

@ -17,6 +17,37 @@
namespace mindspore { namespace mindspore {
namespace device { namespace device {
bool TensorArray::CheckValue(const TypeId &dtype, const std::vector<size_t> &shape) {
MS_LOG(DEBUG) << "Check the data shape and type for " << name_;
if (dtype != dtype_->type_id()) {
MS_LOG(ERROR) << "Invalid data type " << TypeIdLabel(dtype) << " for " << name_ << ", the origin type is "
<< TypeIdLabel(dtype_->type_id());
return false;
}
if (shape != shapes_) {
MS_LOG(ERROR) << "Invalid data shape " << shape << " for " << name_ << ", the origin shape is " << shapes_;
return false;
}
return true;
}
bool TensorArray::CheckReadIndexLogical(const int64_t index) {
if (LongToSize(index) >= valid_size_) {
MS_LOG(ERROR) << "Index " << index << " out of range " << valid_size_ << ", " << name_;
return false;
}
return true;
}
// Function Read() can get the tensors in the scope of tensors_.
mindspore::kernel::AddressPtr TensorArray::Read(const int64_t index) {
if (LongToSize(index) >= tensors_.size()) {
MS_LOG(EXCEPTION) << "Index " << index << " out of range " << tensors_.size() << ", " << name_;
}
MS_LOG(DEBUG) << "Read tensor index = " << index << ", addr = " << tensors_[LongToSize(index)]->addr;
return tensors_[LongToSize(index)];
}
void TensorArray::Clear() { void TensorArray::Clear() {
valid_size_ = 0; valid_size_ = 0;
return; return;

View File

@ -33,11 +33,16 @@ class TensorArray {
: name_(name), dtype_(dtype), shapes_(shapes), valid_size_(0) {} : name_(name), dtype_(dtype), shapes_(shapes), valid_size_(0) {}
virtual ~TensorArray() = default; virtual ~TensorArray() = default;
// Check the index in valid range. Used in Read().
virtual bool CheckReadIndexLogical(const int64_t index);
// Check the dtype and shape of the input data. Used in Write().
virtual bool CheckValue(const TypeId &dtype, const std::vector<size_t> &shape);
// Function Write() is used to insert or append dev_value to the position of index. // Function Write() is used to insert or append dev_value to the position of index.
virtual bool Write(const int64_t index, const mindspore::kernel::AddressPtr &dev_value) = 0; virtual bool Write(const int64_t index, const mindspore::kernel::AddressPtr &dev_value) = 0;
// Function Read() can get the tensors in the scope of tensors_. // Function Read() can get the tensors in the scope of tensors_.
virtual mindspore::kernel::AddressPtr Read(const int64_t index) = 0; virtual mindspore::kernel::AddressPtr Read(const int64_t index);
// Free() will free the memory in TensorArray. // Free() will free the memory in TensorArray.
virtual void Free() = 0; virtual void Free() = 0;

View File

@ -74,3 +74,10 @@ from .pyfunc import _pyfunc_cpu
from .buffer_append import _buffer_append_cpu from .buffer_append import _buffer_append_cpu
from .buffer_get import _buffer_get_cpu from .buffer_get import _buffer_get_cpu
from .buffer_sample import _buffer_sample_cpu from .buffer_sample import _buffer_sample_cpu
from .tensor_array_clear import _tensor_array_clear_cpu
from .tensor_array_close import _tensor_array_close_cpu
from .tensor_array_create import _tensor_array_create_cpu
from .tensor_array_read import _tensor_array_read_cpu
from .tensor_array_size import _tensor_array_size_cpu
from .tensor_array_stack import _tensor_array_stack_cpu
from .tensor_array_write import _tensor_array_write_cpu

View File

@ -0,0 +1,29 @@
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""TensorArrayClear op"""
from mindspore.ops.op_info_register import op_info_register, CpuRegOp, DataType
tensor_array_clear_op_info = CpuRegOp("TensorArrayClear") \
.input(0, "handle", "required") \
.output(0, "y", "required") \
.dtype_format(DataType.I64_Default, DataType.I64_Default) \
.get_op_info()
@op_info_register(tensor_array_clear_op_info)
def _tensor_array_clear_cpu():
"""TensorArrayClear cpu register"""
return

View File

@ -0,0 +1,29 @@
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""TensorArrayClose op"""
from mindspore.ops.op_info_register import op_info_register, CpuRegOp, DataType
tensor_array_close_op_info = CpuRegOp("TensorArrayClose") \
.input(0, "handle", "required") \
.output(0, "y", "required") \
.dtype_format(DataType.I64_Default, DataType.I64_Default) \
.get_op_info()
@op_info_register(tensor_array_close_op_info)
def _tensor_array_close_cpu():
"""TensorArrayClose cpu register"""
return

View File

@ -0,0 +1,28 @@
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""TensorArrayCreate op"""
from mindspore.ops.op_info_register import op_info_register, CpuRegOp, DataType
tensor_array_create_op_info = CpuRegOp("TensorArray") \
.output(0, "handle", "required") \
.dtype_format(DataType.I64_Default) \
.get_op_info()
@op_info_register(tensor_array_create_op_info)
def _tensor_array_create_cpu():
"""TensorArrayCreate cpu register"""
return

View File

@ -0,0 +1,48 @@
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""TensorArrayRead op"""
from mindspore.ops.op_info_register import op_info_register, CpuRegOp, DataType
tensor_array_read_op_info = CpuRegOp("TensorArrayRead") \
.input(0, "handle", "required") \
.input(1, "index", "required") \
.output(0, "y", "required") \
.dtype_format(DataType.I64_Default, DataType.I64_Default, DataType.I64_Default) \
.dtype_format(DataType.I64_Default, DataType.I64_Default, DataType.I32_Default) \
.dtype_format(DataType.I64_Default, DataType.I64_Default, DataType.I16_Default) \
.dtype_format(DataType.I64_Default, DataType.I64_Default, DataType.U32_Default) \
.dtype_format(DataType.I64_Default, DataType.I64_Default, DataType.U16_Default) \
.dtype_format(DataType.I64_Default, DataType.I64_Default, DataType.U8_Default) \
.dtype_format(DataType.I64_Default, DataType.I64_Default, DataType.U64_Default) \
.dtype_format(DataType.I64_Default, DataType.I64_Default, DataType.F16_Default) \
.dtype_format(DataType.I64_Default, DataType.I64_Default, DataType.F32_Default) \
.dtype_format(DataType.I64_Default, DataType.I64_Default, DataType.BOOL_Default) \
.dtype_format(DataType.I32_Default, DataType.I64_Default, DataType.I64_Default) \
.dtype_format(DataType.I32_Default, DataType.I64_Default, DataType.I32_Default) \
.dtype_format(DataType.I32_Default, DataType.I64_Default, DataType.I16_Default) \
.dtype_format(DataType.I32_Default, DataType.I64_Default, DataType.U32_Default) \
.dtype_format(DataType.I32_Default, DataType.I64_Default, DataType.U16_Default) \
.dtype_format(DataType.I32_Default, DataType.I64_Default, DataType.U8_Default) \
.dtype_format(DataType.I32_Default, DataType.I64_Default, DataType.U64_Default) \
.dtype_format(DataType.I32_Default, DataType.I64_Default, DataType.F16_Default) \
.dtype_format(DataType.I32_Default, DataType.I64_Default, DataType.F32_Default) \
.dtype_format(DataType.I32_Default, DataType.I64_Default, DataType.BOOL_Default) \
.get_op_info()
@op_info_register(tensor_array_read_op_info)
def _tensor_array_read_cpu():
"""TensorArrayRead cpu register"""
return

View File

@ -0,0 +1,28 @@
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""TensorArraySize op"""
from mindspore.ops.op_info_register import op_info_register, CpuRegOp, DataType
tensor_array_size_op_info = CpuRegOp("TensorArraySize") \
.input(0, "handle", "required") \
.output(0, "y", "required") \
.dtype_format(DataType.I64_Default, DataType.I64_Default) \
.get_op_info()
@op_info_register(tensor_array_size_op_info)
def _tensor_array_size_cpu():
"""TensorArraySize cpu register"""
return

View File

@ -0,0 +1,37 @@
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""TensorArrayStack op"""
from mindspore.ops.op_info_register import op_info_register, CpuRegOp, DataType
tensor_array_stack_op_info = CpuRegOp("TensorArrayStack") \
.input(0, "handle", "required") \
.output(0, "y", "required") \
.dtype_format(DataType.I64_Default, DataType.I64_Default) \
.dtype_format(DataType.I64_Default, DataType.I32_Default) \
.dtype_format(DataType.I64_Default, DataType.I16_Default) \
.dtype_format(DataType.I64_Default, DataType.U32_Default) \
.dtype_format(DataType.I64_Default, DataType.U16_Default) \
.dtype_format(DataType.I64_Default, DataType.U8_Default) \
.dtype_format(DataType.I64_Default, DataType.U64_Default) \
.dtype_format(DataType.I64_Default, DataType.F16_Default) \
.dtype_format(DataType.I64_Default, DataType.F32_Default) \
.dtype_format(DataType.I64_Default, DataType.BOOL_Default) \
.get_op_info()
@op_info_register(tensor_array_stack_op_info)
def _tensor_array_stack_cpu():
"""TensorArrayStack cpu register"""
return

View File

@ -0,0 +1,49 @@
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""TensorArrayWrite op"""
from mindspore.ops.op_info_register import op_info_register, CpuRegOp, DataType
tensor_array_write_op_info = CpuRegOp("TensorArrayWrite") \
.input(0, "handle", "required") \
.input(1, "index", "required") \
.input(2, "value", "required") \
.output(0, "y", "required") \
.dtype_format(DataType.I64_Default, DataType.I64_Default, DataType.I64_Default, DataType.I64_Default) \
.dtype_format(DataType.I64_Default, DataType.I64_Default, DataType.I32_Default, DataType.I64_Default) \
.dtype_format(DataType.I64_Default, DataType.I64_Default, DataType.I16_Default, DataType.I64_Default) \
.dtype_format(DataType.I64_Default, DataType.I64_Default, DataType.U32_Default, DataType.I64_Default) \
.dtype_format(DataType.I64_Default, DataType.I64_Default, DataType.U16_Default, DataType.I64_Default) \
.dtype_format(DataType.I64_Default, DataType.I64_Default, DataType.U8_Default, DataType.I64_Default) \
.dtype_format(DataType.I64_Default, DataType.I64_Default, DataType.U64_Default, DataType.I64_Default) \
.dtype_format(DataType.I64_Default, DataType.I64_Default, DataType.F16_Default, DataType.I64_Default) \
.dtype_format(DataType.I64_Default, DataType.I64_Default, DataType.F32_Default, DataType.I64_Default) \
.dtype_format(DataType.I64_Default, DataType.I64_Default, DataType.BOOL_Default, DataType.I64_Default) \
.dtype_format(DataType.I64_Default, DataType.I32_Default, DataType.I64_Default, DataType.I64_Default) \
.dtype_format(DataType.I64_Default, DataType.I32_Default, DataType.I32_Default, DataType.I64_Default) \
.dtype_format(DataType.I64_Default, DataType.I32_Default, DataType.I16_Default, DataType.I64_Default) \
.dtype_format(DataType.I64_Default, DataType.I32_Default, DataType.U32_Default, DataType.I64_Default) \
.dtype_format(DataType.I64_Default, DataType.I32_Default, DataType.U16_Default, DataType.I64_Default) \
.dtype_format(DataType.I64_Default, DataType.I32_Default, DataType.U8_Default, DataType.I64_Default) \
.dtype_format(DataType.I64_Default, DataType.I32_Default, DataType.U64_Default, DataType.I64_Default) \
.dtype_format(DataType.I64_Default, DataType.I32_Default, DataType.F16_Default, DataType.I64_Default) \
.dtype_format(DataType.I64_Default, DataType.I32_Default, DataType.F32_Default, DataType.I64_Default) \
.dtype_format(DataType.I64_Default, DataType.I32_Default, DataType.BOOL_Default, DataType.I64_Default) \
.get_op_info()
@op_info_register(tensor_array_write_op_info)
def _tensor_array_write_cpu():
"""TensorArrayWrite cpu register"""
return

View File

@ -0,0 +1,86 @@
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore
import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor
class TensorArrayNet(nn.Cell):
def __init__(self, dtype, element_shape):
super(TensorArrayNet, self).__init__()
self.ta = nn.TensorArray(dtype, element_shape)
def construct(self, index, value):
self.ta.write(index, value)
v = self.ta.read(index)
s = self.ta.stack()
self.ta.close()
return v, s
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_tensorarray():
"""
Feature: TensorArray gpu TEST.
Description: Test the function write, read, stack, clear, close in both graph and pynative mode.
Expectation: success.
"""
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
index = Tensor(0, mindspore.int64)
value = Tensor(5, mindspore.int64)
ta = TensorArrayNet(dtype=mindspore.int64, element_shape=())
v, s = ta(index, value)
expect_v = 5
expect_s = [5]
assert np.allclose(s.asnumpy(), expect_s)
assert np.allclose(v.asnumpy(), expect_v)
context.set_context(mode=context.PYNATIVE_MODE, device_target="CPU")
ta = nn.TensorArray(mindspore.int64, ())
for i in range(5):
ta.write(i, 99)
v = ta.read(0)
s = ta.stack()
expect_v = 99
expect_s = [99, 99, 99, 99, 99]
assert np.allclose(s.asnumpy(), expect_s)
assert np.allclose(v.asnumpy(), expect_v)
ta_size = ta.size()
assert np.allclose(ta_size.asnumpy(), 5)
ta.clear()
ta_size = ta.size()
assert np.allclose(ta_size.asnumpy(), 0)
ta.write(0, 88)
v = ta.read(0)
s = ta.stack()
ta.close()
expect_v = 88
expect_s = [88]
assert np.allclose(s.asnumpy(), expect_s)
assert np.allclose(v.asnumpy(), expect_v)
ta = nn.TensorArray(mindspore.float32, ())
ta.write(5, 1.)
s = ta.stack()
expect_s = [0., 0., 0., 0., 0., 1.]
assert np.allclose(s.asnumpy(), expect_s)
ta.write(2, 1.)
s = ta.stack()
expect_s = [0., 0., 1., 0., 0., 1.]
assert np.allclose(s.asnumpy(), expect_s)
ta.close()