From ef80c94aacd9eab870f0bf9e07ad0d8075142de9 Mon Sep 17 00:00:00 2001 From: sunsuodong Date: Tue, 23 Nov 2021 19:03:24 -0800 Subject: [PATCH] template DelegateModel and fix c api --- include/api/delegate.h | 34 +++++++++++----- include/c_api/context_c.h | 8 ++-- include/c_api/model_c.h | 9 ++++- include/c_api/tensor_c.h | 4 +- .../lite/examples/quick_start_c/build.bat | 26 ++++++++++++ mindspore/lite/src/CMakeLists.txt | 5 --- mindspore/lite/src/c_api/context_c.cc | 14 ++++--- mindspore/lite/src/c_api/model_c.cc | 20 ++++++++-- mindspore/lite/src/c_api/tensor_c.cc | 5 ++- mindspore/lite/src/delegate/delegate.cc | 40 ------------------- mindspore/lite/src/delegate/delegate_utils.h | 8 ++-- .../lite/src/delegate/npu/npu_delegate.cc | 9 +++-- .../lite/src/delegate/npu/npu_delegate.h | 6 +-- .../delegate/tensorrt/tensorrt_delegate.cc | 7 ++-- .../src/delegate/tensorrt/tensorrt_delegate.h | 6 +-- mindspore/lite/src/scheduler.cc | 4 +- mindspore/lite/test/st/delegate_test.cc | 4 +- .../lite/test/ut/src/api/context_c_test.cc | 4 +- .../lite/test/ut/src/api/tensor_c_test.cc | 4 +- .../tools/cropper/build_cropper_config.sh | 1 - 20 files changed, 120 insertions(+), 98 deletions(-) create mode 100644 mindspore/lite/examples/quick_start_c/build.bat delete mode 100644 mindspore/lite/src/delegate/delegate.cc diff --git a/include/api/delegate.h b/include/api/delegate.h index 4c1b28f9884..b17dc087f36 100644 --- a/include/api/delegate.h +++ b/include/api/delegate.h @@ -32,12 +32,14 @@ typedef enum { } SchemaVersion; using KernelIter = std::vector::iterator; + +template class MS_API DelegateModel { public: /// \brief Constructor of MindSpore Lite DelegateModel. DelegateModel(std::vector *kernels, const std::vector &inputs, - const std::vector &outputs, - const std::map &primitives, SchemaVersion version) + const std::vector &outputs, const std::map &primitives, + SchemaVersion version) : kernels_(kernels), inputs_(inputs), outputs_(outputs), primitives_(primitives), version_(version) {} /// \brief Destructor of MindSpore Lite DelegateModel. @@ -47,18 +49,24 @@ class MS_API DelegateModel { /// /// \param[in] a kernel in DelegateModel kernels vector. /// - /// \return The schema::Primitive of The kernel. - const schema::Primitive *GetPrimitive(kernel::Kernel *kernel) const; + /// \return The Primitive of The kernel. + const T *GetPrimitive(kernel::Kernel *kernel) const { + if (primitives_.find(kernel) != primitives_.end()) { + return primitives_.at(kernel); + } else { + return nullptr; + } + } /// \brief Get the begin iterator of the DelegateModel kernels vector. /// /// \return The begin iterator of the DelegateModel kernels vector. - KernelIter BeginKernelIterator(); + KernelIter BeginKernelIterator() { return kernels_->begin(); } /// \brief Get the end iterator of the DelegateModel kernels vector. /// /// \return The end iterator of the DelegateModel kernels vector. - KernelIter EndKernelIterator(); + KernelIter EndKernelIterator() { return kernels_->end(); } /// \brief Replace the continuous kernel supported by the delegate with a delegate graph kernel. /// @@ -66,7 +74,15 @@ class MS_API DelegateModel { /// \param[in] end Define the end iterator of continuous kernel supported by the delegate. /// /// \return The next iterator after graph_kernel, point to the next kernel that is not visited. - KernelIter Replace(KernelIter from, KernelIter end, kernel::Kernel *graph_kernel); + KernelIter Replace(KernelIter from, KernelIter end, kernel::Kernel *graph_kernel) { + size_t insert_index = from - BeginKernelIterator(); + if (insert_index >= kernels_->size()) { + return BeginKernelIterator(); + } + kernels_->erase(from, end); + kernels_->insert(BeginKernelIterator() + insert_index, graph_kernel); + return BeginKernelIterator() + insert_index + 1; + } /// \brief Get the input tensors of DelegateModel. /// @@ -87,7 +103,7 @@ class MS_API DelegateModel { std::vector *kernels_; const std::vector &inputs_; const std::vector &outputs_; - const std::map &primitives_; + const std::map &primitives_; SchemaVersion version_; }; @@ -111,7 +127,7 @@ class MS_API Delegate { /// \note Build willed be called in Model::Build. /// /// \param[in] model Define the delegate model to be built. - virtual Status Build(DelegateModel *model) = 0; + virtual Status Build(DelegateModel *model) = 0; }; } // namespace mindspore #endif // MINDSPORE_INCLUDE_API_DELEGATE_H diff --git a/include/c_api/context_c.h b/include/c_api/context_c.h index 82bc356d39e..980b55b6911 100644 --- a/include/c_api/context_c.h +++ b/include/c_api/context_c.h @@ -35,8 +35,8 @@ MS_API MSContextHandle MSContextCreate(); /// \brief Destroy the context object. /// -/// \param[in] context Context object handle. -MS_API void MSContextDestroy(MSContextHandle context); +/// \param[in] context Context object handle address. +MS_API void MSContextDestroy(MSContextHandle *context); /// \brief Set the number of threads at runtime. /// @@ -110,8 +110,8 @@ MS_API MSDeviceInfoHandle MSDeviceInfoCreate(MSDeviceType device_type); /// \brief Destroy the device info object. /// -/// \param[in] device_info Device info object handle. -MS_API void MSDeviceInfoDestroy(MSDeviceInfoHandle device_info); +/// \param[in] device_info Device info object handle address. +MS_API void MSDeviceInfoDestroy(MSDeviceInfoHandle *device_info); /// \brief Set provider's name. /// diff --git a/include/c_api/model_c.h b/include/c_api/model_c.h index 08cba27311d..ddd31b52a86 100644 --- a/include/c_api/model_c.h +++ b/include/c_api/model_c.h @@ -52,8 +52,15 @@ MS_API MSModelHandle MSModelCreate(); /// \brief Destroy the model object. Only valid for Lite. /// +/// \param[in] model Model object handle address. +MS_API void MSModelDestroy(MSModelHandle *model); + +/// \brief Set workspace for the model object. Only valid for Iot. +/// /// \param[in] model Model object handle. -MS_API void MSModelDestroy(MSModelHandle model); +/// \param[in] workspace Define the workspace address. +/// \param[in] workspace_size Define the workspace size. +MS_API void MSModelSetWorkspace(MSModelHandle model, void *workspace, size_t workspace_size); /// \brief Build the model from model file buffer so that it can run on a device. Only valid for Lite. /// diff --git a/include/c_api/tensor_c.h b/include/c_api/tensor_c.h index 79e4330765f..9783bd901df 100644 --- a/include/c_api/tensor_c.h +++ b/include/c_api/tensor_c.h @@ -42,8 +42,8 @@ MS_API MSTensorHandle MSTensorCreate(const char *name, MSDataType type, const in /// \brief Destroy the tensor object. /// -/// \param[in] tensor Tensor object handle. -MS_API void MSTensorDestroy(MSTensorHandle tensor); +/// \param[in] tensor Tensor object handle address. +MS_API void MSTensorDestroy(MSTensorHandle *tensor); /// \brief Obtain a deep copy of the tensor. /// diff --git a/mindspore/lite/examples/quick_start_c/build.bat b/mindspore/lite/examples/quick_start_c/build.bat new file mode 100644 index 00000000000..c4a2e72c091 --- /dev/null +++ b/mindspore/lite/examples/quick_start_c/build.bat @@ -0,0 +1,26 @@ +@rem Copyright 2021 Huawei Technologies Co., Ltd +@rem +@rem Licensed under the Apache License, Version 2.0 (the "License"); +@rem you may not use this file except in compliance with the License. +@rem You may obtain a copy of the License at +@rem +@rem http://www.apache.org/licenses/LICENSE-2.0 +@rem +@rem Unless required by applicable law or agreed to in writing, software +@rem distributed under the License is distributed on an "AS IS" BASIS, +@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +@rem See the License for the specific language governing permissions and +@rem limitations under the License. +@rem ============================================================================ +@echo off +@title mindspore_lite_quick_start_c_demo_build + +SET BASEPATH=%CD% + +IF NOT EXIST "%BASEPATH%/build" ( + md build +) + +cd %BASEPATH%/build +cmake -G "CodeBlocks - MinGW Makefiles" %BASEPATH% +cmake --build . diff --git a/mindspore/lite/src/CMakeLists.txt b/mindspore/lite/src/CMakeLists.txt index c3ef943ea86..dee2d9186b1 100644 --- a/mindspore/lite/src/CMakeLists.txt +++ b/mindspore/lite/src/CMakeLists.txt @@ -197,11 +197,6 @@ set(LITE_SRC ${KERNEL_REG_SRC} ) -set(LITE_SRC - ${LITE_SRC} - ${CMAKE_CURRENT_SOURCE_DIR}/delegate/delegate.cc - ) - if(MSLITE_GPU_BACKEND STREQUAL opencl) file(GLOB_RECURSE OPENCL_RUNTIME_SRC ${CMAKE_CURRENT_SOURCE_DIR}/runtime/gpu/opencl/*.cc diff --git a/mindspore/lite/src/c_api/context_c.cc b/mindspore/lite/src/c_api/context_c.cc index 0edd7e2f2b5..59857dca482 100644 --- a/mindspore/lite/src/c_api/context_c.cc +++ b/mindspore/lite/src/c_api/context_c.cc @@ -31,10 +31,11 @@ MSContextHandle MSContextCreate() { return static_cast(impl); } -void MSContextDestroy(MSContextHandle context) { - if (context != nullptr) { - auto impl = static_cast(context); +void MSContextDestroy(MSContextHandle *context) { + if (*context != nullptr) { + auto impl = static_cast(*context); delete impl; + *context = nullptr; } } @@ -144,10 +145,11 @@ MSDeviceInfoHandle MSDeviceInfoCreate(MSDeviceType device_type) { return static_cast(impl); } -void MSDeviceInfoDestroy(MSDeviceInfoHandle device_info) { - if (device_info != nullptr) { - auto impl = static_cast(device_info); +void MSDeviceInfoDestroy(MSDeviceInfoHandle *device_info) { + if (*device_info != nullptr) { + auto impl = static_cast(*device_info); delete impl; + *device_info = nullptr; } } diff --git a/mindspore/lite/src/c_api/model_c.cc b/mindspore/lite/src/c_api/model_c.cc index b73a57ee08c..f1f37d593d0 100644 --- a/mindspore/lite/src/c_api/model_c.cc +++ b/mindspore/lite/src/c_api/model_c.cc @@ -309,19 +309,29 @@ MSModelHandle MSModelCreate() { return static_cast(impl); } -void MSModelDestroy(MSModelHandle model) { - if (model != nullptr) { - auto impl = static_cast(model); +void MSModelDestroy(MSModelHandle *model) { + if (*model != nullptr) { + auto impl = static_cast(*model); delete impl; + *model = nullptr; } } +void MSModelSetWorkspace(MSModelHandle model, void *workspace, size_t workspace_size) { + MS_LOG(ERROR) << "Unsupported Feature."; + return; +} + MSStatus MSModelBuild(MSModelHandle model, const void *model_data, size_t data_size, MSModelType model_type, const MSContextHandle model_context) { if (model == nullptr || model_data == nullptr || model_context == nullptr) { MS_LOG(ERROR) << "param is nullptr."; return kMSStatusLiteNullptr; } + if (model_type == kMSModelTypeInvalid) { + MS_LOG(ERROR) << "param is invalid."; + return kMSStatusLiteParamInvalid; + } mindspore::Context::Data *context = static_cast(model_context); auto impl = static_cast(model); auto ret = impl->Build(model_data, data_size, static_cast(model_type), context); @@ -334,6 +344,10 @@ MSStatus MSModelBuildFromFile(MSModelHandle model, const char *model_path, MSMod MS_LOG(ERROR) << "param is nullptr."; return kMSStatusLiteNullptr; } + if (model_type == kMSModelTypeInvalid) { + MS_LOG(ERROR) << "param is invalid."; + return kMSStatusLiteParamInvalid; + } mindspore::Context::Data *context = static_cast(model_context); auto impl = static_cast(model); auto ret = impl->Build(model_path, static_cast(model_type), context); diff --git a/mindspore/lite/src/c_api/tensor_c.cc b/mindspore/lite/src/c_api/tensor_c.cc index 41948e8c59c..e24db584c41 100644 --- a/mindspore/lite/src/c_api/tensor_c.cc +++ b/mindspore/lite/src/c_api/tensor_c.cc @@ -40,10 +40,11 @@ MSTensorHandle MSTensorCreate(const char *name, MSDataType type, const int64_t * return impl; } -void MSTensorDestroy(MSTensorHandle tensor) { - auto impl = static_cast(tensor); +void MSTensorDestroy(MSTensorHandle *tensor) { + auto impl = static_cast(*tensor); if (impl != nullptr) { delete impl; + *tensor = nullptr; } } diff --git a/mindspore/lite/src/delegate/delegate.cc b/mindspore/lite/src/delegate/delegate.cc deleted file mode 100644 index a4f16e3fc83..00000000000 --- a/mindspore/lite/src/delegate/delegate.cc +++ /dev/null @@ -1,40 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "include/api/delegate.h" -namespace mindspore { -const schema::Primitive *DelegateModel::GetPrimitive(kernel::Kernel *kernel) const { - if (primitives_.find(kernel) != primitives_.end()) { - return primitives_.at(kernel); - } else { - return nullptr; - } -} - -KernelIter DelegateModel::BeginKernelIterator() { return kernels_->begin(); } - -KernelIter DelegateModel::EndKernelIterator() { return kernels_->end(); } - -KernelIter DelegateModel::Replace(KernelIter from, KernelIter end, kernel::Kernel *graph_kernel) { - size_t insert_index = from - BeginKernelIterator(); - if (insert_index >= kernels_->size()) { - return BeginKernelIterator(); - } - kernels_->erase(from, end); - kernels_->insert(BeginKernelIterator() + insert_index, graph_kernel); - return BeginKernelIterator() + insert_index + 1; -} -} // namespace mindspore diff --git a/mindspore/lite/src/delegate/delegate_utils.h b/mindspore/lite/src/delegate/delegate_utils.h index d370931fb30..e44c4fdeed1 100644 --- a/mindspore/lite/src/delegate/delegate_utils.h +++ b/mindspore/lite/src/delegate/delegate_utils.h @@ -87,8 +87,8 @@ std::vector GetGraphOutTensors(const std::vector &ops) } template -std::vector GraphInTensors(const std::vector &ops, DelegateModel *model, KernelIter from, - KernelIter end) { +std::vector GraphInTensors(const std::vector &ops, DelegateModel *model, + KernelIter from, KernelIter end) { auto in_tensors = GetGraphInTensors(ops); std::vector all_in_tensors; for (auto op : ops) { @@ -114,8 +114,8 @@ std::vector GraphInTensors(const std::vector &ops, Del } template -std::vector GraphOutTensors(const std::vector &ops, DelegateModel *model, KernelIter from, - KernelIter end) { +std::vector GraphOutTensors(const std::vector &ops, DelegateModel *model, + KernelIter from, KernelIter end) { auto out_tensors = GetGraphOutTensors(ops); std::vector all_out_tensors; for (auto op : ops) { diff --git a/mindspore/lite/src/delegate/npu/npu_delegate.cc b/mindspore/lite/src/delegate/npu/npu_delegate.cc index 6c3b9d96e39..fd4ed74f85b 100644 --- a/mindspore/lite/src/delegate/npu/npu_delegate.cc +++ b/mindspore/lite/src/delegate/npu/npu_delegate.cc @@ -169,7 +169,7 @@ Status NPUDelegate::Init() { return mindspore::kSuccess; } -Status NPUDelegate::Build(DelegateModel *model) { +Status NPUDelegate::Build(DelegateModel *model) { KernelIter from, end; std::vector npu_ops; int graph_index = 0; @@ -269,7 +269,8 @@ NPUOp *NPUDelegate::GetOP(kernel::Kernel *kernel, const schema::Primitive *primi return npu_op; } -std::vector GraphOutTensors(const std::vector &ops, DelegateModel *model, KernelIter from, +std::vector GraphOutTensors(const std::vector &ops, + DelegateModel *model, KernelIter from, KernelIter end) { auto out_tensors = lite::GetGraphOutTensors(ops); std::vector all_out_tensors; @@ -296,8 +297,8 @@ std::vector GraphOutTensors(const std::vector &ops return out_tensors; } -kernel::Kernel *NPUDelegate::CreateNPUGraph(const std::vector &ops, DelegateModel *model, KernelIter from, - KernelIter end) { +kernel::Kernel *NPUDelegate::CreateNPUGraph(const std::vector &ops, DelegateModel *model, + KernelIter from, KernelIter end) { auto in_tensors = lite::GetGraphInTensors(ops); auto out_tensors = GraphOutTensors(ops, model, from, end); auto graph_kernel = new (std::nothrow) NPUGraph(ops, npu_manager_, in_tensors, out_tensors); diff --git a/mindspore/lite/src/delegate/npu/npu_delegate.h b/mindspore/lite/src/delegate/npu/npu_delegate.h index 700c0f42d6c..41ef09c22ec 100644 --- a/mindspore/lite/src/delegate/npu/npu_delegate.h +++ b/mindspore/lite/src/delegate/npu/npu_delegate.h @@ -34,13 +34,13 @@ class NPUDelegate : public Delegate { Status Init() override; - Status Build(DelegateModel *model) override; + Status Build(DelegateModel *model) override; protected: NPUOp *GetOP(kernel::Kernel *kernel, const schema::Primitive *primitive); - kernel::Kernel *CreateNPUGraph(const std::vector &ops, DelegateModel *model, KernelIter from, - KernelIter end); + kernel::Kernel *CreateNPUGraph(const std::vector &ops, DelegateModel *model, + KernelIter from, KernelIter end); NPUManager *npu_manager_ = nullptr; NPUPassManager *pass_manager_ = nullptr; diff --git a/mindspore/lite/src/delegate/tensorrt/tensorrt_delegate.cc b/mindspore/lite/src/delegate/tensorrt/tensorrt_delegate.cc index 260bba4b5af..b9027d3e5cd 100644 --- a/mindspore/lite/src/delegate/tensorrt/tensorrt_delegate.cc +++ b/mindspore/lite/src/delegate/tensorrt/tensorrt_delegate.cc @@ -135,7 +135,7 @@ Status TensorRTDelegate::Init() { return mindspore::kSuccess; } -Status TensorRTDelegate::Build(DelegateModel *model) { +Status TensorRTDelegate::Build(DelegateModel *model) { int ret = lite::SetCudaDevice(device_info_); if (ret != RET_OK) { return mindspore::kLiteError; @@ -209,8 +209,9 @@ TensorRTOp *TensorRTDelegate::FindTensorRTOp(kernel::Kernel *kernel, const schem } } -TensorRTSubGraph *TensorRTDelegate::CreateTensorRTGraph(const std::vector &ops, DelegateModel *model, - KernelIter from, KernelIter end) { +TensorRTSubGraph *TensorRTDelegate::CreateTensorRTGraph(const std::vector &ops, + DelegateModel *model, KernelIter from, + KernelIter end) { auto in_tensors = GraphInTensors(ops, model, from, end); auto out_tensors = GraphOutTensors(ops, model, from, end); auto *tensorrt_graph = new (std::nothrow) TensorRTSubGraph(ops, in_tensors, out_tensors, context_, device_info_, diff --git a/mindspore/lite/src/delegate/tensorrt/tensorrt_delegate.h b/mindspore/lite/src/delegate/tensorrt/tensorrt_delegate.h index c85039709b7..af586d7eaaa 100644 --- a/mindspore/lite/src/delegate/tensorrt/tensorrt_delegate.h +++ b/mindspore/lite/src/delegate/tensorrt/tensorrt_delegate.h @@ -40,13 +40,13 @@ class TensorRTDelegate : public Delegate { Status Init() override; - Status Build(DelegateModel *model) override; + Status Build(DelegateModel *model) override; private: TensorRTOp *FindTensorRTOp(kernel::Kernel *kernel, const schema::Primitive *primitive); - TensorRTSubGraph *CreateTensorRTGraph(const std::vector &ops, DelegateModel *model, KernelIter from, - KernelIter end); + TensorRTSubGraph *CreateTensorRTGraph(const std::vector &ops, DelegateModel *model, + KernelIter from, KernelIter end); std::unordered_map op_func_lists_; diff --git a/mindspore/lite/src/scheduler.cc b/mindspore/lite/src/scheduler.cc index 195970e0abd..dab20d4d51f 100644 --- a/mindspore/lite/src/scheduler.cc +++ b/mindspore/lite/src/scheduler.cc @@ -402,8 +402,8 @@ int Scheduler::ReplaceDelegateKernels(std::vector *dst_ker ms_inputs_ = LiteTensorsToMSTensors(inputs_); ms_outputs_ = LiteTensorsToMSTensors(outputs_); auto schema_version = static_cast(schema_version_); - DelegateModel *model = - new (std::nothrow) DelegateModel(&kernels, ms_inputs_, ms_outputs_, primitives_, schema_version); + DelegateModel *model = + new (std::nothrow) DelegateModel(&kernels, ms_inputs_, ms_outputs_, primitives_, schema_version); if (model == nullptr) { MS_LOG(ERROR) << "New delegate model failed."; return RET_NULL_PTR; diff --git a/mindspore/lite/test/st/delegate_test.cc b/mindspore/lite/test/st/delegate_test.cc index 41fa186f7ad..6a6aa59fe33 100644 --- a/mindspore/lite/test/st/delegate_test.cc +++ b/mindspore/lite/test/st/delegate_test.cc @@ -62,10 +62,10 @@ class CustomDelegate : public Delegate { Status Init() override { return mindspore::kSuccess; } - Status Build(DelegateModel *model) override; + Status Build(DelegateModel *model) override; }; -Status CustomDelegate::Build(DelegateModel *model) { +Status CustomDelegate::Build(DelegateModel *model) { auto graph_kernel = new (std::nothrow) CustomSubgraph(model->inputs(), model->outputs()); if (graph_kernel == nullptr) { return mindspore::kLiteNullptr; diff --git a/mindspore/lite/test/ut/src/api/context_c_test.cc b/mindspore/lite/test/ut/src/api/context_c_test.cc index cfcb63da333..18ee04e43a4 100644 --- a/mindspore/lite/test/ut/src/api/context_c_test.cc +++ b/mindspore/lite/test/ut/src/api/context_c_test.cc @@ -60,7 +60,7 @@ TEST_F(ContextCTest, common_test) { ASSERT_EQ(MSContextGetEnableParallel(context), true); MSDeviceInfoHandle cpu_device_info = MSDeviceInfoCreate(kMSDeviceTypeCPU); - MSDeviceInfoDestroy(cpu_device_info); + MSDeviceInfoDestroy(&cpu_device_info); cpu_device_info = MSDeviceInfoCreate(kMSDeviceTypeCPU); MSDeviceInfoSetEnableFP16(cpu_device_info, true); @@ -68,6 +68,6 @@ TEST_F(ContextCTest, common_test) { MSContextAddDeviceInfo(context, cpu_device_info); MSContextAddDeviceInfo(context, npu_device_info); - MSContextDestroy(context); + MSContextDestroy(&context); } } // namespace mindspore diff --git a/mindspore/lite/test/ut/src/api/tensor_c_test.cc b/mindspore/lite/test/ut/src/api/tensor_c_test.cc index fffd764f593..bb352003a30 100644 --- a/mindspore/lite/test/ut/src/api/tensor_c_test.cc +++ b/mindspore/lite/test/ut/src/api/tensor_c_test.cc @@ -83,7 +83,7 @@ TEST_F(TensorCTest, common_test) { ASSERT_EQ(MSTensorGetDataSize(clone), MSTensorGetDataSize(tensor)); ASSERT_TRUE(MSTensorGetData(clone) != MSTensorGetData(tensor)); - MSTensorDestroy(tensor); - MSTensorDestroy(clone); + MSTensorDestroy(&tensor); + MSTensorDestroy(&clone); } } // namespace mindspore diff --git a/mindspore/lite/tools/cropper/build_cropper_config.sh b/mindspore/lite/tools/cropper/build_cropper_config.sh index 43b58817612..123b3433445 100644 --- a/mindspore/lite/tools/cropper/build_cropper_config.sh +++ b/mindspore/lite/tools/cropper/build_cropper_config.sh @@ -302,7 +302,6 @@ chmod 444 ${GPU_MAPPING_OUTPUT_FILE} # support for npu npu_files=() -while IFS='' read -r line; do npu_files+=("$line"); done < <(ls mindspore/lite/src/delegate/delegate.cc) while IFS='' read -r line; do npu_files+=("$line"); done < <(ls mindspore/lite/src/delegate/npu/*.cc) while IFS='' read -r line; do npu_files+=("$line"); done < <(ls mindspore/lite/src/delegate/npu/op/*.cc) while IFS='' read -r line; do npu_files+=("$line"); done < <(ls mindspore/lite/src/delegate/npu/pass/*.cc)