From 4a7571ad454d91b330120611f8154996ec0bf097 Mon Sep 17 00:00:00 2001 From: ling Date: Tue, 31 Jan 2023 10:42:02 +0800 Subject: [PATCH] [LITE] litekernel support kernelmod --- cmake/package_lite.cmake | 2 - mindspore/lite/src/extendrt/CMakeLists.txt | 1 + .../lite/src/extendrt/lite_kernel_mod.cc | 99 +++++++++++++++++++ mindspore/lite/src/extendrt/lite_kernel_mod.h | 47 +++++++++ .../mindir_model/inner_kernel.cc | 62 ++---------- .../mindir_loader/mindir_model/inner_kernel.h | 7 +- .../lite/src/extendrt/utils/tensor_utils.cc | 51 ++++++++++ .../lite/src/extendrt/utils/tensor_utils.h | 15 +++ .../kernel/cpu/fp32_grad/resize_grad.cc | 7 +- .../litert/kernel/cpu/fp32_grad/resize_grad.h | 3 + mindspore/lite/src/litert/lite_kernel.cc | 3 +- .../cloud_infer/run_benchmark_cloud_ascend.sh | 1 + .../run_benchmark_x86_cloud_cpu.sh | 5 +- ...nchmark_server_inference_tensorrt_cloud.sh | 5 +- .../st/scripts/run_benchmark_x86_cloud.sh | 10 +- mindspore/lite/tools/converter/CMakeLists.txt | 27 +++++ .../lite/tools/optimizer/common/helper.cc | 6 ++ 17 files changed, 275 insertions(+), 76 deletions(-) create mode 100644 mindspore/lite/src/extendrt/lite_kernel_mod.cc create mode 100644 mindspore/lite/src/extendrt/lite_kernel_mod.h diff --git a/cmake/package_lite.cmake b/cmake/package_lite.cmake index b422887e092..1a5aca171e5 100644 --- a/cmake/package_lite.cmake +++ b/cmake/package_lite.cmake @@ -845,8 +845,6 @@ else() DESTINATION ${RUNTIME_LIB_DIR} COMPONENT ${RUNTIME_COMPONENT_NAME}) install(FILES ${glog_LIBPATH}/libmindspore_glog.so.0.4.0 DESTINATION ${RUNTIME_LIB_DIR} RENAME libmindspore_glog.so.0 COMPONENT ${RUNTIME_COMPONENT_NAME}) - install(FILES ${onednn_LIBPATH}/libdnnl.so.2.2 DESTINATION ${RUNTIME_LIB_DIR} - RENAME libdnnl.so.2 COMPONENT ${RUNTIME_COMPONENT_NAME}) install(FILES ${onednn_LIBPATH}/libdnnl.so.2.2 DESTINATION ${DNNL_DIR} RENAME libdnnl.so.2 COMPONENT ${RUNTIME_COMPONENT_NAME}) install(TARGETS mindspore_core DESTINATION ${RUNTIME_LIB_DIR} COMPONENT ${RUNTIME_COMPONENT_NAME}) diff --git a/mindspore/lite/src/extendrt/CMakeLists.txt b/mindspore/lite/src/extendrt/CMakeLists.txt index d89e01c8ad9..f3ffeef6b5e 100644 --- a/mindspore/lite/src/extendrt/CMakeLists.txt +++ b/mindspore/lite/src/extendrt/CMakeLists.txt @@ -27,6 +27,7 @@ if(MSLITE_ENABLE_CLOUD_FUSION_INFERENCE OR MSLITE_ENABLE_CLOUD_INFERENCE) ${CMAKE_CURRENT_SOURCE_DIR}/../common/graph_util.cc ${CMAKE_CURRENT_SOURCE_DIR}/../common/config_infos.cc ${CMAKE_CURRENT_SOURCE_DIR}/../common/config_file.cc + ${CMAKE_CURRENT_SOURCE_DIR}/lite_kernel_mod.cc ${CMAKE_CURRENT_SOURCE_DIR}/subgraph_kernel.cc ${CMAKE_CURRENT_SOURCE_DIR}/numa_adapter.cc ${CMAKE_CURRENT_SOURCE_DIR}/kernel/cpu/less_test_kernel_mod.cc diff --git a/mindspore/lite/src/extendrt/lite_kernel_mod.cc b/mindspore/lite/src/extendrt/lite_kernel_mod.cc new file mode 100644 index 00000000000..cf2297d7e52 --- /dev/null +++ b/mindspore/lite/src/extendrt/lite_kernel_mod.cc @@ -0,0 +1,99 @@ +/** + * Copyright 2023 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "src/extendrt/lite_kernel_mod.h" +#include +#include "plugin/factory/ms_factory.h" +#include "plugin/device/cpu/kernel/cpu_kernel.h" +#include "src/extendrt/utils/tensor_utils.h" + +using mindspore::lite::RET_ERROR; +using mindspore::lite::RET_OK; + +namespace mindspore::kernel { +int LiteKernelMod::Prepare() { + if (!InferShapeDone()) { + return RET_OK; + } + auto inputs = CloudTensorUtils::LiteTensorToKernelTensorPtrVec(in_tensors_); + auto outputs = CloudTensorUtils::LiteTensorToKernelTensorPtrVec(out_tensors_); + + bool ret = kernel_mod_->Init(this->base_operator_, inputs, outputs); + return ret ? ReSize() : RET_ERROR; +} + +int LiteKernelMod::ReSize() { + auto inputs = CloudTensorUtils::LiteTensorToKernelTensorPtrVec(in_tensors_); + auto outputs = CloudTensorUtils::LiteTensorToKernelTensorPtrVec(out_tensors_); + return kernel_mod_->Resize(base_operator_, inputs, outputs); +} + +int LiteKernelMod::Run() { + auto inputs = CloudTensorUtils::LiteTensorToAddressPtrVec(in_tensors_); + auto outputs = CloudTensorUtils::LiteTensorToAddressPtrVec(out_tensors_); + + AddressPtrList workspace; + auto workspace_size = kernel_mod_->GetWorkspaceSizeList(); + for (size_t i = 0; i < workspace_size.size(); i++) { + auto buffer = ms_context_->allocator->Malloc(workspace_size.at(i)); + std::shared_ptr
address = std::make_shared
(buffer, workspace_size.at(i)); + workspace.push_back(address); + } + + auto ret = kernel_mod_->Launch(inputs, workspace, outputs, nullptr); + + for (auto address : workspace) { + ms_context_->allocator->Free(address->addr); + } + + return (ret == true) ? RET_OK : RET_ERROR; +} + +std::shared_ptr LiteKernelModRegistry(BaseOperatorPtr base_operator, + std::vector in_tensors, + std::vector out_tensors, + const lite::InnerContext *ctx) { + std::string op_type = base_operator->name(); + + std::shared_ptr kernel_mod = + mindspore::kernel::Factory::Instance().Create(op_type); + if (kernel_mod == nullptr) { + MS_LOG(ERROR) << "Create kernel mod failed. kernel: " << op_type; + return nullptr; + } + + auto lite_kernel_mod = + std::make_shared(kernel_mod, base_operator, in_tensors, out_tensors, ctx); + + return lite_kernel_mod; +} + +kernel::KernelExec *FindKernelMod(BaseOperatorPtr base_operator, std::vector in_tensors, + std::vector out_tensors, const lite::InnerContext *ctx) { + std::shared_ptr lite_kernel_mod = + mindspore::kernel::LiteKernelModRegistry(base_operator, in_tensors, out_tensors, ctx); + if (lite_kernel_mod == nullptr) { + MS_LOG(ERROR) << "Create lite kernel mod failed. kernel name: " << base_operator; + return nullptr; + } + + kernel::KernelExec *kernel_exec = new kernel::KernelExec(lite_kernel_mod); + auto desc = kernel_exec->desc(); + desc.data_type = in_tensors.front()->data_type(); + kernel_exec->set_desc(desc); + return kernel_exec; +} +} // namespace mindspore::kernel diff --git a/mindspore/lite/src/extendrt/lite_kernel_mod.h b/mindspore/lite/src/extendrt/lite_kernel_mod.h new file mode 100644 index 00000000000..0aa0526e22a --- /dev/null +++ b/mindspore/lite/src/extendrt/lite_kernel_mod.h @@ -0,0 +1,47 @@ +/** + * Copyright 2023 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_SRC_LITERT_LITE_KERNEL_MOD_H_ +#define MINDSPORE_LITE_SRC_LITERT_LITE_KERNEL_MOD_H_ + +#include +#include +#include "src/litert/lite_kernel.h" +#include "src/litert/kernel_exec.h" +#include "kernel/kernel.h" +#include "include/model.h" + +namespace mindspore::kernel { +class LiteKernelMod : public LiteKernel { + public: + explicit LiteKernelMod(std::shared_ptr kernel_mod, + kernel::BaseOperatorPtr base_operator, std::vector in_tensors, + std::vector out_tensors, const lite::InnerContext *ctx) + : LiteKernel(nullptr, in_tensors, out_tensors, ctx), kernel_mod_(kernel_mod), base_operator_(base_operator) {} + ~LiteKernelMod() override = default; + + int Prepare() override; + int ReSize() override; + int Run() override; + + private: + KernelModPtr kernel_mod_; + BaseOperatorPtr base_operator_; +}; + +kernel::KernelExec *FindKernelMod(BaseOperatorPtr base_operator, std::vector in_tensors, + std::vector out_tensors, const lite::InnerContext *ctx); +} // namespace mindspore::kernel +#endif // MINDSPORE_LITE_SRC_LITERT_LITE_KERNEL_MOD_H_ diff --git a/mindspore/lite/src/extendrt/mindir_loader/mindir_model/inner_kernel.cc b/mindspore/lite/src/extendrt/mindir_loader/mindir_model/inner_kernel.cc index 76aa74bcc17..8b228d366fe 100644 --- a/mindspore/lite/src/extendrt/mindir_loader/mindir_model/inner_kernel.cc +++ b/mindspore/lite/src/extendrt/mindir_loader/mindir_model/inner_kernel.cc @@ -22,16 +22,16 @@ namespace mindspore::kernel { int InnerKernel::Prepare() { - auto inputs = LiteTensorToKernelTensorPtrVec(this->in_tensors_); - auto outputs = LiteTensorToKernelTensorPtrVec(this->out_tensors_); + auto inputs = CloudTensorUtils::LiteTensorToKernelTensorPtrVec(this->in_tensors_); + auto outputs = CloudTensorUtils::LiteTensorToKernelTensorPtrVec(this->out_tensors_); return this->kernel_mod_->Init(this->base_operator_, inputs, outputs) ? mindspore::lite::RET_OK : mindspore::lite::RET_ERROR; } int InnerKernel::Execute() { - auto inputs = LiteTensorToAddressPtrVec(this->in_tensors_); - auto outputs = LiteTensorToAddressPtrVec(this->out_tensors_); + auto inputs = CloudTensorUtils::LiteTensorToAddressPtrVec(this->in_tensors_); + auto outputs = CloudTensorUtils::LiteTensorToAddressPtrVec(this->out_tensors_); std::vector workspace; @@ -41,60 +41,10 @@ int InnerKernel::Execute() { int InnerKernel::ReSize() { // use InitOp instead - auto inputs = LiteTensorToKernelTensorPtrVec(this->in_tensors_); - auto outputs = LiteTensorToKernelTensorPtrVec(this->out_tensors_); + auto inputs = CloudTensorUtils::LiteTensorToKernelTensorPtrVec(this->in_tensors_); + auto outputs = CloudTensorUtils::LiteTensorToKernelTensorPtrVec(this->out_tensors_); return this->kernel_mod_->Init(this->base_operator_, inputs, outputs) ? mindspore::lite::RET_OK : mindspore::lite::RET_ERROR; } - -std::vector InnerKernel::LiteTensorToKernelTensorPtrVec( - const std::vector &lite_tensors) { - std::vector ret_vec; - - for (auto lite_tensor : lite_tensors) { - auto kernel_tensor_ptr = LiteTensorToKernelTensorPtr(lite_tensor); - ret_vec.push_back(kernel_tensor_ptr); - } - - return ret_vec; -} - -KernelTensorPtr InnerKernel::LiteTensorToKernelTensorPtr(lite::Tensor *lite_tensor) { - KernelTensorPtr kernel_tensor_ptr = std::make_shared(); - auto address_ptr = LiteTensorToAddressPtr(lite_tensor); - kernel_tensor_ptr->SetData(address_ptr); - kernel_tensor_ptr->SetFormat(lite_tensor->format()); - - auto lite_shape = lite_tensor->shape(); - std::vector shape; - for (size_t i = 0; i < lite_shape.size(); i++) { - shape.push_back(lite_shape[i]); - } - - auto kernel_tensor_abstract_ptr = std::make_shared( - mindspore::TypeIdToType(lite_tensor->data_type()), std::make_shared(shape)); - kernel::TensorInfo info; - info.base_ = kernel_tensor_abstract_ptr; - kernel_tensor_ptr->SetTensorInfo(info); - return kernel_tensor_ptr; -} - -std::vector InnerKernel::LiteTensorToAddressPtrVec(const std::vector &lite_tensors) { - std::vector ret_vec; - - for (auto lite_tensor : lite_tensors) { - auto address_ptr = LiteTensorToAddressPtr(lite_tensor); - ret_vec.push_back(address_ptr); - } - - return ret_vec; -} - -AddressPtr InnerKernel::LiteTensorToAddressPtr(lite::Tensor *lite_tensor) { - AddressPtr address_ptr = std::make_shared(); - address_ptr->addr = lite_tensor->data(); - address_ptr->size = lite_tensor->Size(); - return address_ptr; -} } // namespace mindspore::kernel diff --git a/mindspore/lite/src/extendrt/mindir_loader/mindir_model/inner_kernel.h b/mindspore/lite/src/extendrt/mindir_loader/mindir_model/inner_kernel.h index 47da9d0523c..3791b44887f 100644 --- a/mindspore/lite/src/extendrt/mindir_loader/mindir_model/inner_kernel.h +++ b/mindspore/lite/src/extendrt/mindir_loader/mindir_model/inner_kernel.h @@ -28,6 +28,7 @@ // #include "include/api/context.h" #include "kernel/kernel.h" #include "extendrt/mindir_loader/abstract_kernel.h" +#include "src/extendrt/utils/tensor_utils.h" using mindspore::infer::Abstractkernel; @@ -89,12 +90,6 @@ class InnerKernel : public Abstractkernel { const std::vector &out_tensors() const override { return out_tensors_; } - private: - std::vector LiteTensorToKernelTensorPtrVec(const std::vector &lite_tensors); - KernelTensorPtr LiteTensorToKernelTensorPtr(lite::Tensor *lite_tensor); - std::vector LiteTensorToAddressPtrVec(const std::vector &lite_tensors); - AddressPtr LiteTensorToAddressPtr(lite::Tensor *lite_tensor); - private: std::shared_ptr kernel_mod_ = nullptr; BaseOperatorPtr base_operator_ = nullptr; diff --git a/mindspore/lite/src/extendrt/utils/tensor_utils.cc b/mindspore/lite/src/extendrt/utils/tensor_utils.cc index 40a2806401a..71c86df47e0 100644 --- a/mindspore/lite/src/extendrt/utils/tensor_utils.cc +++ b/mindspore/lite/src/extendrt/utils/tensor_utils.cc @@ -152,4 +152,55 @@ std::vector TensorUtils::TensorPtrToTensor( [](mindspore::tensor::TensorPtr tensor_ptr) { return mindspore::tensor::Tensor(*tensor_ptr); }); return tensors; } + +kernel::AddressPtr CloudTensorUtils::LiteTensorToAddressPtr(const lite::Tensor *lite_tensor) { + kernel::AddressPtr address_ptr = std::make_shared(lite_tensor->data(), lite_tensor->Size()); + return address_ptr; +} + +std::vector CloudTensorUtils::LiteTensorToAddressPtrVec( + const std::vector &lite_tensors) { + kernel::AddressPtrList address_list; + + for (auto lite_tensor : lite_tensors) { + kernel::AddressPtr address = LiteTensorToAddressPtr(lite_tensor); + address_list.push_back(address); + } + + return address_list; +} + +kernel::KernelTensorPtr CloudTensorUtils::LiteTensorToKernelTensorPtr(const lite::Tensor *lite_tensor) { + kernel::AddressPtr address = LiteTensorToAddressPtr(lite_tensor); + kernel::KernelTensorPtr kernel_tensor_ptr = std::make_shared(); + kernel_tensor_ptr->SetData(address); + kernel_tensor_ptr->SetFormat(lite_tensor->format()); + + auto lite_shape = lite_tensor->shape(); + std::vector shape; + for (size_t i = 0; i < lite_shape.size(); i++) { + shape.push_back(lite_shape[i]); + } + + auto kernel_tensor_abstract_ptr = std::make_shared( + mindspore::TypeIdToType(lite_tensor->data_type()), std::make_shared(shape)); + kernel::TensorInfo info; + info.format = lite_tensor->format(); + info.base_ = kernel_tensor_abstract_ptr; + + kernel_tensor_ptr->SetTensorInfo(info); + return kernel_tensor_ptr; +} + +std::vector CloudTensorUtils::LiteTensorToKernelTensorPtrVec( + const std::vector &lite_tensors) { + std::vector kernel_tensor_list; + + for (auto lite_tensor : lite_tensors) { + auto kernel_tensor_ptr = LiteTensorToKernelTensorPtr(lite_tensor); + kernel_tensor_list.push_back(kernel_tensor_ptr); + } + + return kernel_tensor_list; +} } // namespace mindspore diff --git a/mindspore/lite/src/extendrt/utils/tensor_utils.h b/mindspore/lite/src/extendrt/utils/tensor_utils.h index 7c6f39dabc9..83c06131f34 100644 --- a/mindspore/lite/src/extendrt/utils/tensor_utils.h +++ b/mindspore/lite/src/extendrt/utils/tensor_utils.h @@ -30,6 +30,8 @@ #include "common/utils.h" #include "common/mutable_tensor_impl.h" #include "mindspore/core/ir/tensor.h" +#include "kernel/kernel.h" +#include "src/tensor.h" namespace mindspore { class TensorRefData : public tensor::TensorData { @@ -186,6 +188,19 @@ class TensorUtils { static std::vector TensorPtrToTensor( const std::vector &tensor_ptrs); }; + +class CloudTensorUtils { + public: + /* lite tensor ---> Address */ + static kernel::AddressPtr LiteTensorToAddressPtr(const lite::Tensor *lite_tensor); + static std::vector LiteTensorToAddressPtrVec( + const std::vector &lite_tensors); + + /* lite tensor ---> kernel tensor */ + static kernel::KernelTensorPtr LiteTensorToKernelTensorPtr(const lite::Tensor *lite_tensor); + static std::vector LiteTensorToKernelTensorPtrVec( + const std::vector &lite_tensors); +}; } // namespace mindspore #endif // MINDSPORE_LITE_SRC_EXTENDRT_UTILS_TENSOR_UTILS_H_ diff --git a/mindspore/lite/src/litert/kernel/cpu/fp32_grad/resize_grad.cc b/mindspore/lite/src/litert/kernel/cpu/fp32_grad/resize_grad.cc index 89d8b4b0e9e..e49b6ab2390 100644 --- a/mindspore/lite/src/litert/kernel/cpu/fp32_grad/resize_grad.cc +++ b/mindspore/lite/src/litert/kernel/cpu/fp32_grad/resize_grad.cc @@ -21,6 +21,7 @@ #include "schema/model_generated.h" #include "src/litert/kernel_registry.h" #include "include/errorcode.h" +#include "nnacl/nnacl_common.h" using mindspore::kernel::KERNEL_ARCH; using mindspore::lite::KernelRegistrar; @@ -29,9 +30,9 @@ using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_ResizeGrad; namespace mindspore::kernel { -float Scaling(size_t in_size, size_t out_size, bool align_corners) { - return (align_corners && out_size > 1) ? (in_size - 1) / static_cast(out_size - 1) - : in_size / static_cast(out_size); +float ResizeGradCPUKernel::Scaling(size_t in_size, size_t out_size, bool align_corners) { + return (align_corners && out_size > 1) ? (in_size - 1) / (static_cast(out_size - 1)) + : in_size / (static_cast(out_size)); } int ResizeGradCPUKernel::ReSize() { diff --git a/mindspore/lite/src/litert/kernel/cpu/fp32_grad/resize_grad.h b/mindspore/lite/src/litert/kernel/cpu/fp32_grad/resize_grad.h index 802d8c9fa60..9a6ea8e4bd1 100644 --- a/mindspore/lite/src/litert/kernel/cpu/fp32_grad/resize_grad.h +++ b/mindspore/lite/src/litert/kernel/cpu/fp32_grad/resize_grad.h @@ -32,6 +32,9 @@ class ResizeGradCPUKernel : public LiteKernel { int Run() override; int ExecuteInit(int task_id); int DoExecute(int task_id); + + private: + float Scaling(size_t in_size, size_t out_size, bool align_corners); }; } // namespace mindspore::kernel diff --git a/mindspore/lite/src/litert/lite_kernel.cc b/mindspore/lite/src/litert/lite_kernel.cc index b696d1c7cd7..a469bffebe7 100644 --- a/mindspore/lite/src/litert/lite_kernel.cc +++ b/mindspore/lite/src/litert/lite_kernel.cc @@ -108,7 +108,8 @@ int LiteKernel::Execute() { return ret; } - if (op_parameter_->is_zero_shape_ == false) { + /* op_parameter_ is null : run in kernel mod */ + if (op_parameter_ == nullptr || op_parameter_->is_zero_shape_ == false) { ret = Run(); if (lite::RET_OK != ret) { MS_LOG(ERROR) << "run kernel failed, name: " << this->name(); diff --git a/mindspore/lite/test/st/scripts/cloud_infer/run_benchmark_cloud_ascend.sh b/mindspore/lite/test/st/scripts/cloud_infer/run_benchmark_cloud_ascend.sh index 579b77df6a3..9ec1f5fe721 100644 --- a/mindspore/lite/test/st/scripts/cloud_infer/run_benchmark_cloud_ascend.sh +++ b/mindspore/lite/test/st/scripts/cloud_infer/run_benchmark_cloud_ascend.sh @@ -38,6 +38,7 @@ function Run_Benchmark() { cp tools/benchmark/benchmark ./ || exit 1 export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:./runtime/lib export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:./tools/converter/lib/:./runtime/third_party/glog:./runtime/third_party/libjpeg-turbo/lib + export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:./runtime/third_party/dnnl local line_info model_info spec_acc_limit model_name input_num input_shapes \ mode model_file input_files output_file data_path acc_limit enableFp16 \ diff --git a/mindspore/lite/test/st/scripts/cloud_infer/run_benchmark_x86_cloud_cpu.sh b/mindspore/lite/test/st/scripts/cloud_infer/run_benchmark_x86_cloud_cpu.sh index ce09d410dd5..e09ae67a910 100644 --- a/mindspore/lite/test/st/scripts/cloud_infer/run_benchmark_x86_cloud_cpu.sh +++ b/mindspore/lite/test/st/scripts/cloud_infer/run_benchmark_x86_cloud_cpu.sh @@ -9,8 +9,9 @@ function Run_x86() { # $1:framework; echo 'cd '${x86_path}'/mindspore-lite-'${version}'-linux-*' >> "${run_x86_log_file}" cd ${x86_path}/mindspore-lite-${version}-linux-*/ || exit 1 - export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./runtime/lib:./runtime/third_party/glog - export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:./tools/converter/lib/:./runtime/third_party/glog + export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./runtime/lib:./tools/converter/lib/ + export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./runtime/third_party/glog + export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./runtime/third_party/dnnl cp tools/benchmark/benchmark ./ || exit 1 # Run converted models: # $1:cfgFileList; $2:modelPath; $3:dataPath; $4:logFile; $5:resultFile; $6:platform; $7:processor; $8:phoneId; diff --git a/mindspore/lite/test/st/scripts/run_benchmark_server_inference_tensorrt_cloud.sh b/mindspore/lite/test/st/scripts/run_benchmark_server_inference_tensorrt_cloud.sh index 4cec25375a2..d1387221e07 100644 --- a/mindspore/lite/test/st/scripts/run_benchmark_server_inference_tensorrt_cloud.sh +++ b/mindspore/lite/test/st/scripts/run_benchmark_server_inference_tensorrt_cloud.sh @@ -33,8 +33,9 @@ function Run_TensorRT() { # cd ${tensorrt_path}/mindspore-lite-${version}-linux-x64/ || exit 1 echo 'cd '${x86_path}'/mindspore-lite-'${version}'-linux-*' cd ${x86_path}/mindspore-lite-${version}-linux-*/ || exit 1 - export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./runtime/lib:./runtime/third_party/glog - export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./tools/converter/lib/:./runtime/third_party/glog + export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./runtime/lib:./tools/converter/lib/ + export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./runtime/third_party/glog + export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./runtime/third_party/dnnl cp tools/benchmark/benchmark ./ || exit 1 local line_info model_info spec_acc_limit model_name input_num input_shapes \ diff --git a/mindspore/lite/test/st/scripts/run_benchmark_x86_cloud.sh b/mindspore/lite/test/st/scripts/run_benchmark_x86_cloud.sh index 24caa06b2a7..b75b55075b0 100644 --- a/mindspore/lite/test/st/scripts/run_benchmark_x86_cloud.sh +++ b/mindspore/lite/test/st/scripts/run_benchmark_x86_cloud.sh @@ -11,8 +11,9 @@ function Run_x86_java() { tar -zxf mindspore-lite-${version}-linux-x64.tar.gz || exit 1 # compile benchmark cd mindspore-lite-${version}-linux-x64 || exit 1 - export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./runtime/lib:./runtime/third_party/glog - export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:./tools/converter/lib/:./runtime/third_party/glog + export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./runtime/lib:./tools/converter/lib/ + export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./runtime/third_party/glog + export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./runtime/third_party/dnnl echo "javac -cp ${x86_path}/java/mindspore-lite-${version}-linux-x64/runtime/lib/mindspore-lite-java.jar ${basepath}/java/src/main/java/Benchmark.java -d ." javac -cp ${x86_path}/java/mindspore-lite-${version}-linux-x64/runtime/lib/mindspore-lite-java.jar ${basepath}/java/src/main/java/Benchmark.java -d . @@ -60,8 +61,9 @@ function Run_x86() { # $1:framework; echo 'cd '${x86_path}'/mindspore-lite-'${version}'-linux-*' >> "${run_x86_log_file}" cd ${x86_path}/mindspore-lite-${version}-linux-*/ || exit 1 - export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./runtime/lib:./runtime/third_party/glog - export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:./tools/converter/lib/:./runtime/third_party/glog + export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./runtime/lib:./tools/converter/lib/ + export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./runtime/third_party/glog + export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./runtime/third_party/dnnl cp tools/benchmark/benchmark ./ || exit 1 # Run converted models: # $1:cfgFileList; $2:modelPath; $3:dataPath; $4:logFile; $5:resultFile; $6:platform; $7:processor; $8:phoneId; diff --git a/mindspore/lite/tools/converter/CMakeLists.txt b/mindspore/lite/tools/converter/CMakeLists.txt index f2b7099dac9..5bbf019b646 100644 --- a/mindspore/lite/tools/converter/CMakeLists.txt +++ b/mindspore/lite/tools/converter/CMakeLists.txt @@ -16,6 +16,33 @@ set(CCSRC_SRC ${CCSRC_DIR}/kernel/kernel_factory.cc ) +if(MSLITE_ENABLE_CLOUD_FUSION_INFERENCE OR MSLITE_ENABLE_CLOUD_INFERENCE) + set(CCSRC_SRC ${CCSRC_SRC} + ${CCSRC_DIR}/ps/ps_context.cc + ${CCSRC_DIR}/common/thread_pool.cc + ${CCSRC_DIR}/plugin/device/cpu/kernel/cpu_kernel.cc + ${CCSRC_DIR}/distributed/cluster/dummy_cluster_context.cc + ${CCSRC_DIR}/kernel/common_utils.cc + ${CCSRC_DIR}/kernel/kash/kernel_pack.cc + ${CCSRC_DIR}/kernel/kernel_build_info.cc + ${CCSRC_DIR}/kernel/oplib/oplib.cc + ${CCSRC_DIR}/kernel/kernel.cc + ${CCSRC_DIR}/kernel/oplib/super_bar.cc + ${CCSRC_DIR}/runtime/device/kernel_info.cc + ${CCSRC_DIR}/runtime/graph_scheduler/actor/actor_common.cc + ${CCSRC_DIR}/runtime/device/ms_device_shape_transfer.cc + ${CCSRC_DIR}/runtime/hardware/device_type.cc + ${CCSRC_DIR}/runtime/device/kernel_runtime_manager.cc + ${CCSRC_DIR}/runtime/hardware/device_context_manager.cc + ${CCSRC_DIR}/runtime/device/convert_tensor_utils.cc + ${CCSRC_DIR}/backend/common/session/exec_order_builder.cc + ${CCSRC_DIR}/backend/common/session/kernel_graph.cc + ${CCSRC_DIR}/backend/common/session/anf_runtime_algorithm.cc + ${SRC_DIR}/extendrt/lite_kernel_mod.cc + ${SRC_DIR}/extendrt/utils/tensor_utils.cc + ) +endif() + if(NOT WIN32) set(CCSRC_SRC ${CCSRC_SRC} ${CCSRC_DIR}/utils/anfalgo.cc diff --git a/mindspore/lite/tools/optimizer/common/helper.cc b/mindspore/lite/tools/optimizer/common/helper.cc index bc3b9ba0cea..cd9adf3ce06 100644 --- a/mindspore/lite/tools/optimizer/common/helper.cc +++ b/mindspore/lite/tools/optimizer/common/helper.cc @@ -153,6 +153,12 @@ CNodePtr NewCNode(const CNodePtr &cnode, const KernelGraphPtr &fg, const std::ve return nullptr; } +// not implement for lite, just for api compatible +AbstractBasePtr CppInferShapeAndType(const PrimitivePtr &prim, const AbstractBasePtrList &args_spec_list) { + MS_LOG(DEBUG) << "Not implement for lite, just for api compatible."; + return nullptr; +} + std::shared_ptr>> GetRealNodeUsedList(const FuncGraphPtr &graph, const AnfNodePtr &node) { return Helper::GetRealNodeUsedList(graph, node);