From 0be70ca2dd52a1504c3ce5b3d44e0bbdb6c738bd Mon Sep 17 00:00:00 2001 From: VectorSL Date: Tue, 14 Dec 2021 16:21:08 +0800 Subject: [PATCH] fix codex --- .../kernel_compiler/cpu/multinomial_cpu_kernel.cc | 2 +- .../cpu/rl/tensor_array_read_kernel.cc | 7 ++++--- .../cpu/rl/tensor_array_stack_kernel.cc | 11 +++++++---- .../cpu/rl/tensor_array_stack_kernel.h | 4 ++-- .../cpu/rl/tensor_array_write_kernel.cc | 8 ++++---- .../gpu/rl/tensor_array_write_kernel.cc | 2 +- mindspore/ccsrc/runtime/device/tensor_array.cc | 2 +- mindspore/ccsrc/runtime/device/tensor_array.h | 2 +- mindspore/core/abstract/prim_rl.cc | 8 ++++---- mindspore/nn/reinforcement/tensor_array.py | 1 + mindspore/ops/operations/_tensor_array.py | 7 +++++++ 11 files changed, 33 insertions(+), 21 deletions(-) diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/multinomial_cpu_kernel.cc b/mindspore/ccsrc/backend/kernel_compiler/cpu/multinomial_cpu_kernel.cc index 16ae17b1691..45287d41d58 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/cpu/multinomial_cpu_kernel.cc +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/multinomial_cpu_kernel.cc @@ -43,7 +43,7 @@ void MultinomialCpuKernel::InitKernel(const CNodePtr &kernel_node) { std::random_device rd; RNG_seed = static_cast(rd()); } - rng_.seed(RNG_seed); + rng_.seed(LongToUlong(RNG_seed)); } bool MultinomialCpuKernel::Launch(const std::vector &inputs, diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/rl/tensor_array_read_kernel.cc b/mindspore/ccsrc/backend/kernel_compiler/cpu/rl/tensor_array_read_kernel.cc index e7f8f5081fd..0065d6ed07c 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/cpu/rl/tensor_array_read_kernel.cc +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/rl/tensor_array_read_kernel.cc @@ -37,7 +37,7 @@ void TensorArrayCPUReadKernel::InitKernel(const CNodePtr &kernel_node) { type_ = AnfAlgo::GetNodeAttr(kernel_node, "dtype"); value_size_ = GetTypeByte(type_); for (auto i : shapes_) { - value_size_ *= i; + value_size_ *= LongToSize(i); } input_size_list_.push_back(sizeof(int64_t)); input_size_list_.push_back(sizeof(int64_t)); @@ -60,9 +60,10 @@ bool TensorArrayCPUReadKernel::Launch(const std::vector &inputs, con } auto value_addr = tensors_->Read(index_host); MS_LOG(DEBUG) << "Read value index:" << index_host; - auto ret = memcpy_s(out_value, value_size_, value_addr->addr, value_size_); + auto out_value_size = value_size_; + auto ret = memcpy_s(out_value, out_value_size, value_addr->addr, value_size_); if (ret != EOK) { - MS_LOG(EXCEPTION) << "Memcpy failed."; + MS_LOG(EXCEPTION) << "Memcpy failed, errorno(" << ret << ")"; } return true; } diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/rl/tensor_array_stack_kernel.cc b/mindspore/ccsrc/backend/kernel_compiler/cpu/rl/tensor_array_stack_kernel.cc index 3983e75d2f4..23de73ffb86 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/cpu/rl/tensor_array_stack_kernel.cc +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/rl/tensor_array_stack_kernel.cc @@ -45,7 +45,7 @@ void TensorArrayCPUStackKernel::InitKernel(const CNodePtr &kernel_node) { type_ = AnfAlgo::GetNodeAttr(kernel_node, "dtype"); ele_size_ = GetTypeByte(type_); for (auto i : shapes_) { - ele_size_ *= i; + ele_size_ *= LongToSize(i); } value_size_ = ele_size_ * LongToSize(max_element); output_size_list_.push_back(value_size_); @@ -57,7 +57,7 @@ void TensorArrayCPUStackKernel::PostExecute() { MS_EXCEPTION_IF_NULL(tensors_); size_t tensor_size = tensors_->GetValidSize(); auto shape = shapes_; - shape.insert(shape.begin(), tensor_size); + (void)shape.insert(shape.begin(), tensor_size); MS_LOG(DEBUG) << "After postexecute, the real shape of TensorArrayStack is " << shape; AnfAlgo::SetOutputInferTypeAndShape({type_->type_id()}, {shape}, kernel_node_.lock().get()); } @@ -85,9 +85,12 @@ bool TensorArrayCPUStackKernel::Launch(const std::vector &inputs, co MS_LOG(EXCEPTION) << "Invalid TensorArray size, maybe should Clear() TensorArray before next usage."; } for (size_t i = 0; i < tensors_->GetValidSize(); i++) { - auto ret = memcpy_s(out_value + ele_size_ * i, ele_size_, tensors_->GetTensorAddr(i), ele_size_); + auto out_ele_size = ele_size_; + auto src_addr = tensors_->GetTensorAddr(i); + MS_EXCEPTION_IF_NULL(src_addr); + auto ret = memcpy_s(out_value + ele_size_ * i, out_ele_size, src_addr, ele_size_); if (ret != EOK) { - MS_LOG(EXCEPTION) << "Memcpy failed."; + MS_LOG(EXCEPTION) << "Memcpy failed, errorno(" << ret << ")"; } } PostExecute(); diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/rl/tensor_array_stack_kernel.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/rl/tensor_array_stack_kernel.h index b81d5573a78..6445f9016ae 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/cpu/rl/tensor_array_stack_kernel.h +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/rl/tensor_array_stack_kernel.h @@ -44,8 +44,8 @@ class TensorArrayCPUStackKernel : public CPUKernel { private: CNodeWeakPtr kernel_node_; int64_t handle_; - int64_t value_size_; - int64_t ele_size_; + size_t value_size_; + size_t ele_size_; std::vector shapes_; TypePtr type_; std::vector input_size_list_; diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/rl/tensor_array_write_kernel.cc b/mindspore/ccsrc/backend/kernel_compiler/cpu/rl/tensor_array_write_kernel.cc index 6847f022ea5..54f20720aee 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/cpu/rl/tensor_array_write_kernel.cc +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/rl/tensor_array_write_kernel.cc @@ -25,7 +25,7 @@ constexpr size_t kSecondInputIndex = 2; using mindspore::device::TensorArrayMgr; using mindspore::device::cpu::CPUTensorArray; using mindspore::device::cpu::CPUTensorArrayPtr; -TensorArrayCPUWriteKernel::TensorArrayCPUWriteKernel() : value_size_(0) {} +TensorArrayCPUWriteKernel::TensorArrayCPUWriteKernel() : value_size_(0), type_(kTypeUnknown) {} const std::vector &TensorArrayCPUWriteKernel::GetInputSizeList() const { return input_size_list_; } @@ -47,7 +47,7 @@ void TensorArrayCPUWriteKernel::InitKernel(const CNodePtr &kernel_node) { output_size_list_.push_back(sizeof(int64_t)); } -bool TensorArrayCPUWriteKernel::Launch(const std::vector &inputs, const std::vector &outputs, +bool TensorArrayCPUWriteKernel::Launch(const std::vector &inputs, const std::vector &, const std::vector &) { auto handle_addr = GetDeviceAddress(inputs, 0); auto index = GetDeviceAddress(inputs, 1); @@ -73,9 +73,9 @@ bool TensorArrayCPUWriteKernel::Launch(const std::vector &inputs, co } MS_EXCEPTION_IF_NULL(dev_addr->addr); dev_addr->size = value_size_; - auto ret = memcpy_s(dev_addr->addr, value_size_, value, value_size_); + auto ret = memcpy_s(dev_addr->addr, dev_addr->size, value, value_size_); if (ret != EOK) { - MS_LOG(EXCEPTION) << "Memcpy failed."; + MS_LOG(EXCEPTION) << "Memcpy failed, errorno(" << ret << ")"; } if (tensors_->Write(index_host, dev_addr)) { MS_LOG(DEBUG) << "Write to tensorarry succeed, index " << index_host; diff --git a/mindspore/ccsrc/backend/kernel_compiler/gpu/rl/tensor_array_write_kernel.cc b/mindspore/ccsrc/backend/kernel_compiler/gpu/rl/tensor_array_write_kernel.cc index e03de3a83c5..314fcc47de5 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/gpu/rl/tensor_array_write_kernel.cc +++ b/mindspore/ccsrc/backend/kernel_compiler/gpu/rl/tensor_array_write_kernel.cc @@ -52,7 +52,7 @@ void TensorArrayWriteKernel::InitSizeLists() { output_size_list_.push_back(sizeof(int64_t)); } -bool TensorArrayWriteKernel::Launch(const std::vector &inputs, const std::vector &outputs, +bool TensorArrayWriteKernel::Launch(const std::vector &inputs, const std::vector &, const std::vector &, void *stream) { auto handle_addr = GetDeviceAddress(inputs, 0); auto index = GetDeviceAddress(inputs, 1); diff --git a/mindspore/ccsrc/runtime/device/tensor_array.cc b/mindspore/ccsrc/runtime/device/tensor_array.cc index 64ad28ac356..3ce38dbd76f 100644 --- a/mindspore/ccsrc/runtime/device/tensor_array.cc +++ b/mindspore/ccsrc/runtime/device/tensor_array.cc @@ -107,7 +107,7 @@ void TensorArray::Free() { size_t TensorArray::GetValidSize() const { return valid_size_; } size_t TensorArray::GetRealSize() const { return tensors_.size(); } -void *TensorArray::GetTensorAddr(const size_t &index) const { return tensors_[index]->addr; } +const void *TensorArray::GetTensorAddr(const size_t &index) const { return tensors_[index]->addr; } void TensorArray::SetMaxSize(const int64_t size, const bool is_dynamic) { is_dynamic_ = is_dynamic; diff --git a/mindspore/ccsrc/runtime/device/tensor_array.h b/mindspore/ccsrc/runtime/device/tensor_array.h index 4e6e6aebee1..825e2465850 100644 --- a/mindspore/ccsrc/runtime/device/tensor_array.h +++ b/mindspore/ccsrc/runtime/device/tensor_array.h @@ -70,7 +70,7 @@ class TensorArray { virtual void SetMaxSize(const int64_t size, const bool is_dynamic); // Return the tensor address in position index. - virtual void *GetTensorAddr(const size_t &index) const; + virtual const void *GetTensorAddr(const size_t &index) const; protected: std::string name_; diff --git a/mindspore/core/abstract/prim_rl.cc b/mindspore/core/abstract/prim_rl.cc index 605d0ca8c18..b9352675330 100644 --- a/mindspore/core/abstract/prim_rl.cc +++ b/mindspore/core/abstract/prim_rl.cc @@ -31,7 +31,7 @@ namespace mindspore { namespace abstract { constexpr int64_t kMaxElement = 10000; AbstractBasePtr InferImplTensorArrayStack(const AnalysisEnginePtr &, const PrimitivePtr &primitive, - const AbstractBasePtrList &args_spec_list) { + const AbstractBasePtrList &) { // Infer TensorArrayStack const std::string op_name = primitive->name(); auto attr_shape = primitive->GetAttr("element_shape"); @@ -48,9 +48,9 @@ AbstractBasePtr InferImplTensorArrayStack(const AnalysisEnginePtr &, const Primi auto max_shape_ = ele_shape; auto min_shape_ = ele_shape; auto out_shape_ = ele_shape; - max_shape_.insert(max_shape_.begin(), kMaxElement); - min_shape_.insert(min_shape_.begin(), 1); - out_shape_.insert(out_shape_.begin(), -1); + (void)max_shape_.insert(max_shape_.begin(), kMaxElement); + (void)min_shape_.insert(min_shape_.begin(), 1); + (void)out_shape_.insert(out_shape_.begin(), -1); ShapeVector out_shape = out_shape_; ShapeVector min_shape = min_shape_; ShapeVector max_shape = max_shape_; diff --git a/mindspore/nn/reinforcement/tensor_array.py b/mindspore/nn/reinforcement/tensor_array.py index 0b837a28d5e..3277449c1ba 100644 --- a/mindspore/nn/reinforcement/tensor_array.py +++ b/mindspore/nn/reinforcement/tensor_array.py @@ -20,6 +20,7 @@ from mindspore.ops.operations import _tensor_array as ta from mindspore._checkparam import Rel, Validator from mindspore.common import dtype as mstype + class TensorArray(Cell): r"""TensorArray: a dynamic array to store tensors. diff --git a/mindspore/ops/operations/_tensor_array.py b/mindspore/ops/operations/_tensor_array.py index 8cc7aa60224..db01c259d2d 100644 --- a/mindspore/ops/operations/_tensor_array.py +++ b/mindspore/ops/operations/_tensor_array.py @@ -21,6 +21,7 @@ from ..._checkparam import Rel from ...common import dtype as mstype from ..primitive import prim_attr_register, PrimitiveWithInfer, Primitive + class TensorArray(PrimitiveWithInfer): r""" TensorArrayCreate used to create a TensorArray and return an unique handle. @@ -66,6 +67,7 @@ class TensorArray(PrimitiveWithInfer): def infer_dtype(self): return mstype.int64 + class TensorArrayWrite(PrimitiveWithInfer): r""" TensorArrayWrite used to write tensor into a created TensorArray. @@ -102,6 +104,7 @@ class TensorArrayWrite(PrimitiveWithInfer): validator.check_type_name("value", value_type, mstype.number_type + (mstype.bool_,), self.name) return mstype.int64 + class TensorArrayRead(PrimitiveWithInfer): r""" TensorArrayRead used to read tensor from a created TensorArray by the given index. @@ -149,6 +152,7 @@ class TensorArrayRead(PrimitiveWithInfer): validator.check_type_name("index", index_type, (int, ms.int64), self.name) return self.dtype + class TensorArrayClose(PrimitiveWithInfer): r""" TensorArrayClose used to close the created TensorArray. The resources in TensorArray will be deleted. @@ -181,6 +185,7 @@ class TensorArrayClose(PrimitiveWithInfer): validator.check_type_name("handle", handle_type, (ms.int64), self.name) return mstype.int64 + class TensorArrayClear(PrimitiveWithInfer): r""" TensorArrayClear used to reset the created TensorArray. The instance of TensorArray is still aviliable. @@ -213,6 +218,7 @@ class TensorArrayClear(PrimitiveWithInfer): validator.check_type_name("handle", handle_type, (ms.int64), self.name) return mstype.int64 + class TensorArrayStack(Primitive): r""" TensorArrayStack used to stack the tensors in a created TensorArray into one tensor. @@ -252,6 +258,7 @@ class TensorArrayStack(Primitive): self.add_prim_attr('is_dynamic_shape', True) self.add_prim_attr('side_effect_mem', True) + class TensorArraySize(PrimitiveWithInfer): r""" TensorArraySize used to get the logical size of the created TensorArray.