[LITE] litekernel support kernelmod

This commit is contained in:
ling 2023-01-31 10:42:02 +08:00
parent fcabc30952
commit 4a7571ad45
17 changed files with 275 additions and 76 deletions

View File

@ -845,8 +845,6 @@ else()
DESTINATION ${RUNTIME_LIB_DIR} COMPONENT ${RUNTIME_COMPONENT_NAME})
install(FILES ${glog_LIBPATH}/libmindspore_glog.so.0.4.0 DESTINATION ${RUNTIME_LIB_DIR}
RENAME libmindspore_glog.so.0 COMPONENT ${RUNTIME_COMPONENT_NAME})
install(FILES ${onednn_LIBPATH}/libdnnl.so.2.2 DESTINATION ${RUNTIME_LIB_DIR}
RENAME libdnnl.so.2 COMPONENT ${RUNTIME_COMPONENT_NAME})
install(FILES ${onednn_LIBPATH}/libdnnl.so.2.2 DESTINATION ${DNNL_DIR}
RENAME libdnnl.so.2 COMPONENT ${RUNTIME_COMPONENT_NAME})
install(TARGETS mindspore_core DESTINATION ${RUNTIME_LIB_DIR} COMPONENT ${RUNTIME_COMPONENT_NAME})

View File

@ -27,6 +27,7 @@ if(MSLITE_ENABLE_CLOUD_FUSION_INFERENCE OR MSLITE_ENABLE_CLOUD_INFERENCE)
${CMAKE_CURRENT_SOURCE_DIR}/../common/graph_util.cc
${CMAKE_CURRENT_SOURCE_DIR}/../common/config_infos.cc
${CMAKE_CURRENT_SOURCE_DIR}/../common/config_file.cc
${CMAKE_CURRENT_SOURCE_DIR}/lite_kernel_mod.cc
${CMAKE_CURRENT_SOURCE_DIR}/subgraph_kernel.cc
${CMAKE_CURRENT_SOURCE_DIR}/numa_adapter.cc
${CMAKE_CURRENT_SOURCE_DIR}/kernel/cpu/less_test_kernel_mod.cc

View File

@ -0,0 +1,99 @@
/**
* Copyright 2023 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "src/extendrt/lite_kernel_mod.h"
#include <string>
#include "plugin/factory/ms_factory.h"
#include "plugin/device/cpu/kernel/cpu_kernel.h"
#include "src/extendrt/utils/tensor_utils.h"
using mindspore::lite::RET_ERROR;
using mindspore::lite::RET_OK;
namespace mindspore::kernel {
int LiteKernelMod::Prepare() {
if (!InferShapeDone()) {
return RET_OK;
}
auto inputs = CloudTensorUtils::LiteTensorToKernelTensorPtrVec(in_tensors_);
auto outputs = CloudTensorUtils::LiteTensorToKernelTensorPtrVec(out_tensors_);
bool ret = kernel_mod_->Init(this->base_operator_, inputs, outputs);
return ret ? ReSize() : RET_ERROR;
}
int LiteKernelMod::ReSize() {
auto inputs = CloudTensorUtils::LiteTensorToKernelTensorPtrVec(in_tensors_);
auto outputs = CloudTensorUtils::LiteTensorToKernelTensorPtrVec(out_tensors_);
return kernel_mod_->Resize(base_operator_, inputs, outputs);
}
int LiteKernelMod::Run() {
auto inputs = CloudTensorUtils::LiteTensorToAddressPtrVec(in_tensors_);
auto outputs = CloudTensorUtils::LiteTensorToAddressPtrVec(out_tensors_);
AddressPtrList workspace;
auto workspace_size = kernel_mod_->GetWorkspaceSizeList();
for (size_t i = 0; i < workspace_size.size(); i++) {
auto buffer = ms_context_->allocator->Malloc(workspace_size.at(i));
std::shared_ptr<Address> address = std::make_shared<Address>(buffer, workspace_size.at(i));
workspace.push_back(address);
}
auto ret = kernel_mod_->Launch(inputs, workspace, outputs, nullptr);
for (auto address : workspace) {
ms_context_->allocator->Free(address->addr);
}
return (ret == true) ? RET_OK : RET_ERROR;
}
std::shared_ptr<LiteKernelMod> LiteKernelModRegistry(BaseOperatorPtr base_operator,
std::vector<lite::Tensor *> in_tensors,
std::vector<lite::Tensor *> out_tensors,
const lite::InnerContext *ctx) {
std::string op_type = base_operator->name();
std::shared_ptr<kernel::KernelMod> kernel_mod =
mindspore::kernel::Factory<kernel::NativeCpuKernelMod>::Instance().Create(op_type);
if (kernel_mod == nullptr) {
MS_LOG(ERROR) << "Create kernel mod failed. kernel: " << op_type;
return nullptr;
}
auto lite_kernel_mod =
std::make_shared<mindspore::kernel::LiteKernelMod>(kernel_mod, base_operator, in_tensors, out_tensors, ctx);
return lite_kernel_mod;
}
kernel::KernelExec *FindKernelMod(BaseOperatorPtr base_operator, std::vector<lite::Tensor *> in_tensors,
std::vector<lite::Tensor *> out_tensors, const lite::InnerContext *ctx) {
std::shared_ptr<kernel::LiteKernelMod> lite_kernel_mod =
mindspore::kernel::LiteKernelModRegistry(base_operator, in_tensors, out_tensors, ctx);
if (lite_kernel_mod == nullptr) {
MS_LOG(ERROR) << "Create lite kernel mod failed. kernel name: " << base_operator;
return nullptr;
}
kernel::KernelExec *kernel_exec = new kernel::KernelExec(lite_kernel_mod);
auto desc = kernel_exec->desc();
desc.data_type = in_tensors.front()->data_type();
kernel_exec->set_desc(desc);
return kernel_exec;
}
} // namespace mindspore::kernel

View File

@ -0,0 +1,47 @@
/**
* Copyright 2023 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_SRC_LITERT_LITE_KERNEL_MOD_H_
#define MINDSPORE_LITE_SRC_LITERT_LITE_KERNEL_MOD_H_
#include <memory>
#include <vector>
#include "src/litert/lite_kernel.h"
#include "src/litert/kernel_exec.h"
#include "kernel/kernel.h"
#include "include/model.h"
namespace mindspore::kernel {
class LiteKernelMod : public LiteKernel {
public:
explicit LiteKernelMod(std::shared_ptr<mindspore::kernel::KernelMod> kernel_mod,
kernel::BaseOperatorPtr base_operator, std::vector<lite::Tensor *> in_tensors,
std::vector<lite::Tensor *> out_tensors, const lite::InnerContext *ctx)
: LiteKernel(nullptr, in_tensors, out_tensors, ctx), kernel_mod_(kernel_mod), base_operator_(base_operator) {}
~LiteKernelMod() override = default;
int Prepare() override;
int ReSize() override;
int Run() override;
private:
KernelModPtr kernel_mod_;
BaseOperatorPtr base_operator_;
};
kernel::KernelExec *FindKernelMod(BaseOperatorPtr base_operator, std::vector<lite::Tensor *> in_tensors,
std::vector<lite::Tensor *> out_tensors, const lite::InnerContext *ctx);
} // namespace mindspore::kernel
#endif // MINDSPORE_LITE_SRC_LITERT_LITE_KERNEL_MOD_H_

View File

@ -22,16 +22,16 @@
namespace mindspore::kernel {
int InnerKernel::Prepare() {
auto inputs = LiteTensorToKernelTensorPtrVec(this->in_tensors_);
auto outputs = LiteTensorToKernelTensorPtrVec(this->out_tensors_);
auto inputs = CloudTensorUtils::LiteTensorToKernelTensorPtrVec(this->in_tensors_);
auto outputs = CloudTensorUtils::LiteTensorToKernelTensorPtrVec(this->out_tensors_);
return this->kernel_mod_->Init(this->base_operator_, inputs, outputs) ? mindspore::lite::RET_OK
: mindspore::lite::RET_ERROR;
}
int InnerKernel::Execute() {
auto inputs = LiteTensorToAddressPtrVec(this->in_tensors_);
auto outputs = LiteTensorToAddressPtrVec(this->out_tensors_);
auto inputs = CloudTensorUtils::LiteTensorToAddressPtrVec(this->in_tensors_);
auto outputs = CloudTensorUtils::LiteTensorToAddressPtrVec(this->out_tensors_);
std::vector<AddressPtr> workspace;
@ -41,60 +41,10 @@ int InnerKernel::Execute() {
int InnerKernel::ReSize() {
// use InitOp instead
auto inputs = LiteTensorToKernelTensorPtrVec(this->in_tensors_);
auto outputs = LiteTensorToKernelTensorPtrVec(this->out_tensors_);
auto inputs = CloudTensorUtils::LiteTensorToKernelTensorPtrVec(this->in_tensors_);
auto outputs = CloudTensorUtils::LiteTensorToKernelTensorPtrVec(this->out_tensors_);
return this->kernel_mod_->Init(this->base_operator_, inputs, outputs) ? mindspore::lite::RET_OK
: mindspore::lite::RET_ERROR;
}
std::vector<KernelTensorPtr> InnerKernel::LiteTensorToKernelTensorPtrVec(
const std::vector<lite::Tensor *> &lite_tensors) {
std::vector<KernelTensorPtr> ret_vec;
for (auto lite_tensor : lite_tensors) {
auto kernel_tensor_ptr = LiteTensorToKernelTensorPtr(lite_tensor);
ret_vec.push_back(kernel_tensor_ptr);
}
return ret_vec;
}
KernelTensorPtr InnerKernel::LiteTensorToKernelTensorPtr(lite::Tensor *lite_tensor) {
KernelTensorPtr kernel_tensor_ptr = std::make_shared<mindspore::kernel::KernelTensor>();
auto address_ptr = LiteTensorToAddressPtr(lite_tensor);
kernel_tensor_ptr->SetData(address_ptr);
kernel_tensor_ptr->SetFormat(lite_tensor->format());
auto lite_shape = lite_tensor->shape();
std::vector<int64_t> shape;
for (size_t i = 0; i < lite_shape.size(); i++) {
shape.push_back(lite_shape[i]);
}
auto kernel_tensor_abstract_ptr = std::make_shared<mindspore::abstract::AbstractTensor>(
mindspore::TypeIdToType(lite_tensor->data_type()), std::make_shared<abstract::Shape>(shape));
kernel::TensorInfo info;
info.base_ = kernel_tensor_abstract_ptr;
kernel_tensor_ptr->SetTensorInfo(info);
return kernel_tensor_ptr;
}
std::vector<AddressPtr> InnerKernel::LiteTensorToAddressPtrVec(const std::vector<lite::Tensor *> &lite_tensors) {
std::vector<AddressPtr> ret_vec;
for (auto lite_tensor : lite_tensors) {
auto address_ptr = LiteTensorToAddressPtr(lite_tensor);
ret_vec.push_back(address_ptr);
}
return ret_vec;
}
AddressPtr InnerKernel::LiteTensorToAddressPtr(lite::Tensor *lite_tensor) {
AddressPtr address_ptr = std::make_shared<mindspore::kernel::Address>();
address_ptr->addr = lite_tensor->data();
address_ptr->size = lite_tensor->Size();
return address_ptr;
}
} // namespace mindspore::kernel

View File

@ -28,6 +28,7 @@
// #include "include/api/context.h"
#include "kernel/kernel.h"
#include "extendrt/mindir_loader/abstract_kernel.h"
#include "src/extendrt/utils/tensor_utils.h"
using mindspore::infer::Abstractkernel;
@ -89,12 +90,6 @@ class InnerKernel : public Abstractkernel {
const std::vector<lite::Tensor *> &out_tensors() const override { return out_tensors_; }
private:
std::vector<KernelTensorPtr> LiteTensorToKernelTensorPtrVec(const std::vector<lite::Tensor *> &lite_tensors);
KernelTensorPtr LiteTensorToKernelTensorPtr(lite::Tensor *lite_tensor);
std::vector<AddressPtr> LiteTensorToAddressPtrVec(const std::vector<lite::Tensor *> &lite_tensors);
AddressPtr LiteTensorToAddressPtr(lite::Tensor *lite_tensor);
private:
std::shared_ptr<mindspore::kernel::KernelMod> kernel_mod_ = nullptr;
BaseOperatorPtr base_operator_ = nullptr;

View File

@ -152,4 +152,55 @@ std::vector<mindspore::tensor::Tensor> TensorUtils::TensorPtrToTensor(
[](mindspore::tensor::TensorPtr tensor_ptr) { return mindspore::tensor::Tensor(*tensor_ptr); });
return tensors;
}
kernel::AddressPtr CloudTensorUtils::LiteTensorToAddressPtr(const lite::Tensor *lite_tensor) {
kernel::AddressPtr address_ptr = std::make_shared<kernel::Address>(lite_tensor->data(), lite_tensor->Size());
return address_ptr;
}
std::vector<mindspore::kernel::AddressPtr> CloudTensorUtils::LiteTensorToAddressPtrVec(
const std::vector<lite::Tensor *> &lite_tensors) {
kernel::AddressPtrList address_list;
for (auto lite_tensor : lite_tensors) {
kernel::AddressPtr address = LiteTensorToAddressPtr(lite_tensor);
address_list.push_back(address);
}
return address_list;
}
kernel::KernelTensorPtr CloudTensorUtils::LiteTensorToKernelTensorPtr(const lite::Tensor *lite_tensor) {
kernel::AddressPtr address = LiteTensorToAddressPtr(lite_tensor);
kernel::KernelTensorPtr kernel_tensor_ptr = std::make_shared<kernel::KernelTensor>();
kernel_tensor_ptr->SetData(address);
kernel_tensor_ptr->SetFormat(lite_tensor->format());
auto lite_shape = lite_tensor->shape();
std::vector<int64_t> shape;
for (size_t i = 0; i < lite_shape.size(); i++) {
shape.push_back(lite_shape[i]);
}
auto kernel_tensor_abstract_ptr = std::make_shared<mindspore::abstract::AbstractTensor>(
mindspore::TypeIdToType(lite_tensor->data_type()), std::make_shared<abstract::Shape>(shape));
kernel::TensorInfo info;
info.format = lite_tensor->format();
info.base_ = kernel_tensor_abstract_ptr;
kernel_tensor_ptr->SetTensorInfo(info);
return kernel_tensor_ptr;
}
std::vector<kernel::KernelTensorPtr> CloudTensorUtils::LiteTensorToKernelTensorPtrVec(
const std::vector<lite::Tensor *> &lite_tensors) {
std::vector<kernel::KernelTensorPtr> kernel_tensor_list;
for (auto lite_tensor : lite_tensors) {
auto kernel_tensor_ptr = LiteTensorToKernelTensorPtr(lite_tensor);
kernel_tensor_list.push_back(kernel_tensor_ptr);
}
return kernel_tensor_list;
}
} // namespace mindspore

View File

@ -30,6 +30,8 @@
#include "common/utils.h"
#include "common/mutable_tensor_impl.h"
#include "mindspore/core/ir/tensor.h"
#include "kernel/kernel.h"
#include "src/tensor.h"
namespace mindspore {
class TensorRefData : public tensor::TensorData {
@ -186,6 +188,19 @@ class TensorUtils {
static std::vector<mindspore::tensor::Tensor> TensorPtrToTensor(
const std::vector<mindspore::tensor::TensorPtr> &tensor_ptrs);
};
class CloudTensorUtils {
public:
/* lite tensor ---> Address */
static kernel::AddressPtr LiteTensorToAddressPtr(const lite::Tensor *lite_tensor);
static std::vector<mindspore::kernel::AddressPtr> LiteTensorToAddressPtrVec(
const std::vector<lite::Tensor *> &lite_tensors);
/* lite tensor ---> kernel tensor */
static kernel::KernelTensorPtr LiteTensorToKernelTensorPtr(const lite::Tensor *lite_tensor);
static std::vector<kernel::KernelTensorPtr> LiteTensorToKernelTensorPtrVec(
const std::vector<lite::Tensor *> &lite_tensors);
};
} // namespace mindspore
#endif // MINDSPORE_LITE_SRC_EXTENDRT_UTILS_TENSOR_UTILS_H_

View File

@ -21,6 +21,7 @@
#include "schema/model_generated.h"
#include "src/litert/kernel_registry.h"
#include "include/errorcode.h"
#include "nnacl/nnacl_common.h"
using mindspore::kernel::KERNEL_ARCH;
using mindspore::lite::KernelRegistrar;
@ -29,9 +30,9 @@ using mindspore::lite::RET_OK;
using mindspore::schema::PrimitiveType_ResizeGrad;
namespace mindspore::kernel {
float Scaling(size_t in_size, size_t out_size, bool align_corners) {
return (align_corners && out_size > 1) ? (in_size - 1) / static_cast<float>(out_size - 1)
: in_size / static_cast<float>(out_size);
float ResizeGradCPUKernel::Scaling(size_t in_size, size_t out_size, bool align_corners) {
return (align_corners && out_size > 1) ? (in_size - 1) / (static_cast<float>(out_size - 1))
: in_size / (static_cast<float>(out_size));
}
int ResizeGradCPUKernel::ReSize() {

View File

@ -32,6 +32,9 @@ class ResizeGradCPUKernel : public LiteKernel {
int Run() override;
int ExecuteInit(int task_id);
int DoExecute(int task_id);
private:
float Scaling(size_t in_size, size_t out_size, bool align_corners);
};
} // namespace mindspore::kernel

View File

@ -108,7 +108,8 @@ int LiteKernel::Execute() {
return ret;
}
if (op_parameter_->is_zero_shape_ == false) {
/* op_parameter_ is null : run in kernel mod */
if (op_parameter_ == nullptr || op_parameter_->is_zero_shape_ == false) {
ret = Run();
if (lite::RET_OK != ret) {
MS_LOG(ERROR) << "run kernel failed, name: " << this->name();

View File

@ -38,6 +38,7 @@ function Run_Benchmark() {
cp tools/benchmark/benchmark ./ || exit 1
export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:./runtime/lib
export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:./tools/converter/lib/:./runtime/third_party/glog:./runtime/third_party/libjpeg-turbo/lib
export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:./runtime/third_party/dnnl
local line_info model_info spec_acc_limit model_name input_num input_shapes \
mode model_file input_files output_file data_path acc_limit enableFp16 \

View File

@ -9,8 +9,9 @@ function Run_x86() {
# $1:framework;
echo 'cd '${x86_path}'/mindspore-lite-'${version}'-linux-*' >> "${run_x86_log_file}"
cd ${x86_path}/mindspore-lite-${version}-linux-*/ || exit 1
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./runtime/lib:./runtime/third_party/glog
export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:./tools/converter/lib/:./runtime/third_party/glog
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./runtime/lib:./tools/converter/lib/
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./runtime/third_party/glog
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./runtime/third_party/dnnl
cp tools/benchmark/benchmark ./ || exit 1
# Run converted models:
# $1:cfgFileList; $2:modelPath; $3:dataPath; $4:logFile; $5:resultFile; $6:platform; $7:processor; $8:phoneId;

View File

@ -33,8 +33,9 @@ function Run_TensorRT() {
# cd ${tensorrt_path}/mindspore-lite-${version}-linux-x64/ || exit 1
echo 'cd '${x86_path}'/mindspore-lite-'${version}'-linux-*'
cd ${x86_path}/mindspore-lite-${version}-linux-*/ || exit 1
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./runtime/lib:./runtime/third_party/glog
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./tools/converter/lib/:./runtime/third_party/glog
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./runtime/lib:./tools/converter/lib/
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./runtime/third_party/glog
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./runtime/third_party/dnnl
cp tools/benchmark/benchmark ./ || exit 1
local line_info model_info spec_acc_limit model_name input_num input_shapes \

View File

@ -11,8 +11,9 @@ function Run_x86_java() {
tar -zxf mindspore-lite-${version}-linux-x64.tar.gz || exit 1
# compile benchmark
cd mindspore-lite-${version}-linux-x64 || exit 1
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./runtime/lib:./runtime/third_party/glog
export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:./tools/converter/lib/:./runtime/third_party/glog
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./runtime/lib:./tools/converter/lib/
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./runtime/third_party/glog
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./runtime/third_party/dnnl
echo "javac -cp ${x86_path}/java/mindspore-lite-${version}-linux-x64/runtime/lib/mindspore-lite-java.jar ${basepath}/java/src/main/java/Benchmark.java -d ."
javac -cp ${x86_path}/java/mindspore-lite-${version}-linux-x64/runtime/lib/mindspore-lite-java.jar ${basepath}/java/src/main/java/Benchmark.java -d .
@ -60,8 +61,9 @@ function Run_x86() {
# $1:framework;
echo 'cd '${x86_path}'/mindspore-lite-'${version}'-linux-*' >> "${run_x86_log_file}"
cd ${x86_path}/mindspore-lite-${version}-linux-*/ || exit 1
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./runtime/lib:./runtime/third_party/glog
export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:./tools/converter/lib/:./runtime/third_party/glog
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./runtime/lib:./tools/converter/lib/
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./runtime/third_party/glog
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./runtime/third_party/dnnl
cp tools/benchmark/benchmark ./ || exit 1
# Run converted models:
# $1:cfgFileList; $2:modelPath; $3:dataPath; $4:logFile; $5:resultFile; $6:platform; $7:processor; $8:phoneId;

View File

@ -16,6 +16,33 @@ set(CCSRC_SRC
${CCSRC_DIR}/kernel/kernel_factory.cc
)
if(MSLITE_ENABLE_CLOUD_FUSION_INFERENCE OR MSLITE_ENABLE_CLOUD_INFERENCE)
set(CCSRC_SRC ${CCSRC_SRC}
${CCSRC_DIR}/ps/ps_context.cc
${CCSRC_DIR}/common/thread_pool.cc
${CCSRC_DIR}/plugin/device/cpu/kernel/cpu_kernel.cc
${CCSRC_DIR}/distributed/cluster/dummy_cluster_context.cc
${CCSRC_DIR}/kernel/common_utils.cc
${CCSRC_DIR}/kernel/kash/kernel_pack.cc
${CCSRC_DIR}/kernel/kernel_build_info.cc
${CCSRC_DIR}/kernel/oplib/oplib.cc
${CCSRC_DIR}/kernel/kernel.cc
${CCSRC_DIR}/kernel/oplib/super_bar.cc
${CCSRC_DIR}/runtime/device/kernel_info.cc
${CCSRC_DIR}/runtime/graph_scheduler/actor/actor_common.cc
${CCSRC_DIR}/runtime/device/ms_device_shape_transfer.cc
${CCSRC_DIR}/runtime/hardware/device_type.cc
${CCSRC_DIR}/runtime/device/kernel_runtime_manager.cc
${CCSRC_DIR}/runtime/hardware/device_context_manager.cc
${CCSRC_DIR}/runtime/device/convert_tensor_utils.cc
${CCSRC_DIR}/backend/common/session/exec_order_builder.cc
${CCSRC_DIR}/backend/common/session/kernel_graph.cc
${CCSRC_DIR}/backend/common/session/anf_runtime_algorithm.cc
${SRC_DIR}/extendrt/lite_kernel_mod.cc
${SRC_DIR}/extendrt/utils/tensor_utils.cc
)
endif()
if(NOT WIN32)
set(CCSRC_SRC ${CCSRC_SRC}
${CCSRC_DIR}/utils/anfalgo.cc

View File

@ -153,6 +153,12 @@ CNodePtr NewCNode(const CNodePtr &cnode, const KernelGraphPtr &fg, const std::ve
return nullptr;
}
// not implement for lite, just for api compatible
AbstractBasePtr CppInferShapeAndType(const PrimitivePtr &prim, const AbstractBasePtrList &args_spec_list) {
MS_LOG(DEBUG) << "Not implement for lite, just for api compatible.";
return nullptr;
}
std::shared_ptr<std::vector<std::pair<AnfNodePtr, int>>> GetRealNodeUsedList(const FuncGraphPtr &graph,
const AnfNodePtr &node) {
return Helper::GetRealNodeUsedList(graph, node);