From 6760d9976d9584bdf48c4ae8bb023dbf04c67300 Mon Sep 17 00:00:00 2001 From: WilliamLian Date: Thu, 13 Aug 2020 16:42:02 +0800 Subject: [PATCH] add reshape type to tensor --- mindspore/ccsrc/backend/kernel_compiler/kernel.h | 8 -------- .../backend/kernel_compiler/kernel_build_info.h | 1 + .../tbe/tbe_kernel_select/tbe_kernel_select.cc | 8 ++++---- .../backend/optimizer/ascend/ascend_helper.cc | 4 ++-- .../backend/optimizer/ascend/ascend_helper.h | 2 +- .../backend/session/anf_runtime_algorithm.cc | 6 +++--- .../backend/session/anf_runtime_algorithm.h | 7 ++++--- mindspore/ccsrc/backend/session/session_basic.cc | 5 ++++- mindspore/ccsrc/common/trans.cc | 2 +- mindspore/ccsrc/common/trans.h | 3 +-- mindspore/core/ir/kernel_info_dev.h | 6 ++++++ mindspore/core/ir/tensor.cc | 7 +++++-- mindspore/core/ir/tensor.h | 3 +++ tests/ut/cpp/utils/signal_test.cc | 16 ++++++++-------- 14 files changed, 43 insertions(+), 35 deletions(-) diff --git a/mindspore/ccsrc/backend/kernel_compiler/kernel.h b/mindspore/ccsrc/backend/kernel_compiler/kernel.h index 5725bc80aee..01f8e75f49a 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/kernel.h +++ b/mindspore/ccsrc/backend/kernel_compiler/kernel.h @@ -30,14 +30,6 @@ namespace mindspore { enum KernelType : int { UNKNOWN_KERNEL_TYPE = 0, AKG_KERNEL, AICPU_KERNEL, RT_KERNEL, HCCL_KERNEL, TBE_KERNEL }; namespace kernel { - -enum Axis : int { - N = 0, - C, - H, - W, -}; - // Supported fusion type enum FusionType { CONVLUTION = 0, diff --git a/mindspore/ccsrc/backend/kernel_compiler/kernel_build_info.h b/mindspore/ccsrc/backend/kernel_compiler/kernel_build_info.h index f45a1b48876..71d9825d5a7 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/kernel_build_info.h +++ b/mindspore/ccsrc/backend/kernel_compiler/kernel_build_info.h @@ -22,6 +22,7 @@ #include #include #include "ir/dtype.h" +#include "ir/kernel_info_dev.h" #include "backend/kernel_compiler/kernel.h" namespace mindspore { diff --git a/mindspore/ccsrc/backend/kernel_compiler/tbe/tbe_kernel_select/tbe_kernel_select.cc b/mindspore/ccsrc/backend/kernel_compiler/tbe/tbe_kernel_select/tbe_kernel_select.cc index 5635811425c..ec176e324d2 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/tbe/tbe_kernel_select/tbe_kernel_select.cc +++ b/mindspore/ccsrc/backend/kernel_compiler/tbe/tbe_kernel_select/tbe_kernel_select.cc @@ -406,16 +406,16 @@ void TbeKernelSelect::StringToAxisVector(const std::string &reshape_type_str, st for (const auto &c : reshape_type_str) { switch (c) { case 'N': - reshape_type_vec->push_back(kernel::N); + reshape_type_vec->push_back(N); break; case 'C': - reshape_type_vec->push_back(kernel::C); + reshape_type_vec->push_back(C); break; case 'H': - reshape_type_vec->push_back(kernel::H); + reshape_type_vec->push_back(H); break; case 'W': - reshape_type_vec->push_back(kernel::W); + reshape_type_vec->push_back(W); break; default: MS_LOG(EXCEPTION) << "Unknown axis " << c << "in reshape type."; diff --git a/mindspore/ccsrc/backend/optimizer/ascend/ascend_helper.cc b/mindspore/ccsrc/backend/optimizer/ascend/ascend_helper.cc index d37a31b95af..1c96b417bb4 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/ascend_helper.cc +++ b/mindspore/ccsrc/backend/optimizer/ascend/ascend_helper.cc @@ -55,7 +55,7 @@ AnfNodePtr AddTransOpNodeToGraph(const FuncGraphPtr &func_graph, const AnfNodePt CNodePtr trans_data = nullptr; std::string input_format = is_insert_input ? kOpFormat_DEFAULT : AnfAlgo::GetOutputFormat(node, 0); std::string dst_format = is_insert_input ? AnfAlgo::GetInputFormat(node, 0) : kOpFormat_DEFAULT; - std::vector padding_axis; + std::vector padding_axis; MS_EXCEPTION_IF_NULL(node); // if insert transdata for input we need to change the input if (is_insert_input) { @@ -170,7 +170,7 @@ AnfNodePtr InsertTransOpForMultipleOutput(const FuncGraphPtr &func_graph, const } } // namespace void RefreshKernelBuildInfo(const std::string &input_format, const std::string &output_format, - const AnfNodePtr &trans_data, const std::vector &reshape_type, + const AnfNodePtr &trans_data, const std::vector &reshape_type, const TypeId &type_id) { MS_EXCEPTION_IF_NULL(trans_data); auto ori_build_info = AnfAlgo::GetSelectKernelBuildInfo(trans_data); diff --git a/mindspore/ccsrc/backend/optimizer/ascend/ascend_helper.h b/mindspore/ccsrc/backend/optimizer/ascend/ascend_helper.h index 4d2833b999b..b7a86efa4ce 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/ascend_helper.h +++ b/mindspore/ccsrc/backend/optimizer/ascend/ascend_helper.h @@ -86,7 +86,7 @@ class OpFinder { using OpFinderPtr = std::shared_ptr; void RefreshKernelBuildInfo(const std::string &input_format, const std::string &output_format, - const AnfNodePtr &trans_data, const std::vector &reshape_type = {}, + const AnfNodePtr &trans_data, const std::vector &reshape_type = {}, const TypeId &type_id = kTypeUnknown); CNodePtr NewTransOpNode(const FuncGraphPtr &func_graph, const AnfNodePtr &input, const KernelSelectPtr &kernel_select, diff --git a/mindspore/ccsrc/backend/session/anf_runtime_algorithm.cc b/mindspore/ccsrc/backend/session/anf_runtime_algorithm.cc index 8a212be1f02..06c1219e93d 100644 --- a/mindspore/ccsrc/backend/session/anf_runtime_algorithm.cc +++ b/mindspore/ccsrc/backend/session/anf_runtime_algorithm.cc @@ -418,7 +418,7 @@ std::string AnfRuntimeAlgorithm::GetPrevNodeOutputFormat(const AnfNodePtr &anf_n return AnfRuntimeAlgorithm::GetOutputFormat(kernel_with_index.first, kernel_with_index.second); } -std::vector AnfRuntimeAlgorithm::GetPrevNodeOutputReshapeType(const AnfNodePtr &node, size_t input_idx) { +std::vector AnfRuntimeAlgorithm::GetPrevNodeOutputReshapeType(const AnfNodePtr &node, size_t input_idx) { KernelWithIndex kernel_with_index = AnfAlgo::GetPrevNodeOutput(node, input_idx); return GetOutputReshapeType(kernel_with_index.first, kernel_with_index.second); } @@ -483,7 +483,7 @@ std::vector AnfRuntimeAlgorithm::GetInputDeviceShape(const AnfNodePtr &n return trans::TransShapeToDevice(infer_shape, format); } -std::vector AnfRuntimeAlgorithm::GetInputReshapeType(const AnfNodePtr &node, size_t input_idx) { +std::vector AnfRuntimeAlgorithm::GetInputReshapeType(const AnfNodePtr &node, size_t input_idx) { MS_EXCEPTION_IF_NULL(node); if (input_idx > GetInputTensorNum(node)) { MS_LOG(EXCEPTION) << "The index:" << input_idx @@ -503,7 +503,7 @@ std::vector AnfRuntimeAlgorithm::GetInputReshapeType(const AnfNode return build_info->GetInputReshapeType(input_idx); } -std::vector AnfRuntimeAlgorithm::GetOutputReshapeType(const AnfNodePtr &node, size_t output_idx) { +std::vector AnfRuntimeAlgorithm::GetOutputReshapeType(const AnfNodePtr &node, size_t output_idx) { MS_EXCEPTION_IF_NULL(node); if (output_idx > GetOutputTensorNum(node)) { MS_LOG(EXCEPTION) << "The index [" << output_idx << "] is out of range of the node's output size [ " diff --git a/mindspore/ccsrc/backend/session/anf_runtime_algorithm.h b/mindspore/ccsrc/backend/session/anf_runtime_algorithm.h index 89a5ecbce39..dfd3fede50a 100644 --- a/mindspore/ccsrc/backend/session/anf_runtime_algorithm.h +++ b/mindspore/ccsrc/backend/session/anf_runtime_algorithm.h @@ -27,6 +27,7 @@ #include "ir/dtype.h" #include "base/base.h" #include "ir/primitive.h" +#include "ir/kernel_info_dev.h" #include "runtime/device/device_address.h" #include "backend/kernel_compiler/kernel.h" #include "backend/kernel_compiler/kernel_build_info.h" @@ -109,7 +110,7 @@ class AnfRuntimeAlgorithm { // get output format from prev node,input_index is the input index of current node related to prev node static std::string GetPrevNodeOutputFormat(const AnfNodePtr &node, size_t input_idx); // get reshape_type of from the output of input node. - static std::vector GetPrevNodeOutputReshapeType(const AnfNodePtr &node, size_t input_idx); + static std::vector GetPrevNodeOutputReshapeType(const AnfNodePtr &node, size_t input_idx); // get output shapes inferred by ME from input nodes. static std::vector GetOutputInferShape(const AnfNodePtr &node, size_t output_idx); // get input shapes inferred by ME from input nodes. @@ -119,9 +120,9 @@ class AnfRuntimeAlgorithm { // get input shapes which will built and run in device static std::vector GetInputDeviceShape(const AnfNodePtr &node, size_t input_idx); // Get Input Padding Axis - static std::vector GetInputReshapeType(const AnfNodePtr &node, size_t output_idx); + static std::vector GetInputReshapeType(const AnfNodePtr &node, size_t output_idx); // Get Output Padding Axis - static std::vector GetOutputReshapeType(const AnfNodePtr &node, size_t output_idx); + static std::vector GetOutputReshapeType(const AnfNodePtr &node, size_t output_idx); // get output data type inferred by ME of anf node static TypeId GetOutputInferDataType(const AnfNodePtr &node, size_t output_idx); // get output original data type from prev node,input_index is the input index of current node related to prev node diff --git a/mindspore/ccsrc/backend/session/session_basic.cc b/mindspore/ccsrc/backend/session/session_basic.cc index e32bb4baef2..fadeeca49b9 100644 --- a/mindspore/ccsrc/backend/session/session_basic.cc +++ b/mindspore/ccsrc/backend/session/session_basic.cc @@ -66,12 +66,13 @@ tensor::TensorPtr CreateOutputTensor(const AnfNodePtr &node, size_t output_index if (type_id == kTypeUnknown) { type_id = AnfAlgo::GetOutputInferDataType(node, output_index); } - tensor::TensorPtr tensor; + tensor::TensorPtr tensor = nullptr; std::vector temp_shape; if (graph->IsUniqueTargetInternalOutput(node, output_index)) { temp_shape.emplace_back(1); tensor = std::make_shared(type_id, temp_shape); tensor->set_device_address(address); + tensor->set_padding_type(AnfAlgo::GetOutputReshapeType(node, output_index)); tensor->set_dirty(false); return tensor; } @@ -86,6 +87,7 @@ tensor::TensorPtr CreateOutputTensor(const AnfNodePtr &node, size_t output_index graph->AddInternalOutputTensor(node, output_index, tensor); } } + tensor->set_padding_type(AnfAlgo::GetOutputReshapeType(node, output_index)); // if in paynative mode,data only copyed to host when user want to print data auto ms_context = MsContext::GetInstance(); MS_EXCEPTION_IF_NULL(ms_context); @@ -240,6 +242,7 @@ ParameterPtr ConstructRunOpParameter(const std::shared_ptr &graph, } else { kernel_build_info_builder->SetOutputsFormat(std::vector{device_address->format()}); kernel_build_info_builder->SetOutputsDeviceType(std::vector{device_address->type_id()}); + kernel_build_info_builder->SetOutputsReshapeType({input_tensor->padding_type()}); } AnfAlgo::SetSelectKernelBuildInfo(kernel_build_info_builder->Build(), param.get()); // construct abstract of parameter diff --git a/mindspore/ccsrc/common/trans.cc b/mindspore/ccsrc/common/trans.cc index 1b10a7d2f76..d6da517fce2 100644 --- a/mindspore/ccsrc/common/trans.cc +++ b/mindspore/ccsrc/common/trans.cc @@ -399,7 +399,7 @@ std::vector GetRuntimePaddingShape(const AnfNodePtr &node, size_t index) { return shape; } -std::vector PaddingShapeTo4d(const std::vector &shape, const std::vector &padding_axis) { +std::vector PaddingShapeTo4d(const std::vector &shape, const std::vector &padding_axis) { if (padding_axis.empty() || shape.size() != padding_axis.size()) { return PaddingShapeTo4dByDefault(shape); } diff --git a/mindspore/ccsrc/common/trans.h b/mindspore/ccsrc/common/trans.h index 286c76afd0e..c815fbc31f8 100644 --- a/mindspore/ccsrc/common/trans.h +++ b/mindspore/ccsrc/common/trans.h @@ -51,8 +51,7 @@ size_t TypeIdSize(const TypeId data_type); size_t ShapeSize(const std::vector &shape); size_t CubeSizeByType(const TypeId data_type); -std::vector PaddingShapeTo4d(const std::vector &shape, - const std::vector &padding_axis = {}); +std::vector PaddingShapeTo4d(const std::vector &shape, const std::vector &padding_axis = {}); std::vector GetRuntimePaddingShape(const AnfNodePtr &node, size_t index); bool IsNeedPadding(const std::string &format, const size_t shape_size); std::vector TransShapeToDevice(const std::vector &shape, const std::string &format); diff --git a/mindspore/core/ir/kernel_info_dev.h b/mindspore/core/ir/kernel_info_dev.h index 70665a14713..9d20a9f67e3 100644 --- a/mindspore/core/ir/kernel_info_dev.h +++ b/mindspore/core/ir/kernel_info_dev.h @@ -20,6 +20,12 @@ #include namespace mindspore { +enum Axis : int { + N = 0, + C, + H, + W, +}; // Interface for device kernel program information. class KernelInfoDevice { public: diff --git a/mindspore/core/ir/tensor.cc b/mindspore/core/ir/tensor.cc index edcabc67be4..39340dc39a4 100644 --- a/mindspore/core/ir/tensor.cc +++ b/mindspore/core/ir/tensor.cc @@ -384,7 +384,8 @@ Tensor::Tensor(const Tensor &tensor) data_(tensor.data_), dirty_(tensor.dirty_), id_(tensor.id_), - device_sync_(tensor.device_sync_) {} + device_sync_(tensor.device_sync_), + padding_type_(tensor.padding_type()) {} Tensor::Tensor(const Tensor &tensor, TypeId data_type) : MetaTensor(data_type, tensor.shape_), @@ -392,7 +393,8 @@ Tensor::Tensor(const Tensor &tensor, TypeId data_type) data_(MakeTensorData(data_type, tensor.shape_, tensor.data_->data(), tensor.data_type_)), dirty_(tensor.dirty_), id_(tensor.id_), - device_sync_(tensor.device_sync_) {} + device_sync_(tensor.device_sync_), + padding_type_(tensor.padding_type()) {} Tensor::Tensor(TypeId data_type, const std::vector &shape, TensorDataPtr data) : MetaTensor(data_type, shape), data_(std::move(data)), id_(MakeId()) {} @@ -441,6 +443,7 @@ Tensor &Tensor::AssignValue(const Tensor &tensor) { device_sync_ = tensor.device_sync_; data_ = tensor.data_; id_ = tensor.id_; + padding_type_ = tensor.padding_type_; } return *this; } diff --git a/mindspore/core/ir/tensor.h b/mindspore/core/ir/tensor.h index c61add5a23d..6db6147beb0 100644 --- a/mindspore/core/ir/tensor.h +++ b/mindspore/core/ir/tensor.h @@ -221,6 +221,8 @@ class Tensor : public MetaTensor { DeviceSyncPtr device_address() const { return device_sync_; } void set_device_address(const DeviceSyncPtr &device_sync) { device_sync_ = device_sync; } + void set_padding_type(std::vector padding_type) { padding_type_ = padding_type; } + std::vector padding_type() const { return padding_type_; } std::string id() const { return id_; } @@ -230,6 +232,7 @@ class Tensor : public MetaTensor { bool dirty_{true}; std::string id_{""}; DeviceSyncPtr device_sync_{nullptr}; + std::vector padding_type_; }; using TensorPtr = std::shared_ptr; using TensorPtrList = std::vector>; diff --git a/tests/ut/cpp/utils/signal_test.cc b/tests/ut/cpp/utils/signal_test.cc index f8b5acd40f2..6ae0c081925 100644 --- a/tests/ut/cpp/utils/signal_test.cc +++ b/tests/ut/cpp/utils/signal_test.cc @@ -54,10 +54,10 @@ class A { std::shared_ptr i; }; -class C : public A { +class Ca : public A { public: - C() {} - explicit C(signals *sigs) : A(sigs) { printf("conn C:%p\n", this); } + Ca() {} + explicit Ca(signals *sigs) : A(sigs) { printf("conn C:%p\n", this); } void FuncA(int v1, float v2, std::string str) { printf("C: --%d--%f--%s--\n", v1, v2, str.c_str()); } }; @@ -71,13 +71,13 @@ class B : public A { TEST_F(TestSignal, test_common) { A objA; B objB; - C objC; + Ca objC; Signal signal; signal.connect(&objA, &A::FuncA); signal.connect(&objB, &B::FuncA); - signal.connect(&objC, &C::FuncA); + signal.connect(&objC, &Ca::FuncA); signal(20, 20, "Signal-Slot test"); } @@ -85,11 +85,11 @@ TEST_F(TestSignal, test_sigs) { signals sigs; A objA(&sigs); B objB(&sigs); - C objC(&sigs); + Ca objC(&sigs); sigs.signal.connect(&objA, &A::FuncA); sigs.signal.connect(&objB, &B::FuncA); - sigs.signal.connect(&objC, &C::FuncA); + sigs.signal.connect(&objC, &Ca::FuncA); sigs.signal(20, 20, "sigs Signal-Slot test"); } @@ -97,7 +97,7 @@ TEST_F(TestSignal, test_sigs_Named) { signals sigs; A objA(&sigs); B objB(&sigs); - C objC(&sigs); + Ca objC(&sigs); sigs.signal(10, 20, "Signal-Slot test"); std::shared_ptr a;