template DelegateModel and fix c api

This commit is contained in:
sunsuodong 2021-11-23 19:03:24 -08:00
parent dfd71f668b
commit ef80c94aac
20 changed files with 120 additions and 98 deletions

View File

@ -32,12 +32,14 @@ typedef enum {
} SchemaVersion;
using KernelIter = std::vector<kernel::Kernel *>::iterator;
template <class T>
class MS_API DelegateModel {
public:
/// \brief Constructor of MindSpore Lite DelegateModel.
DelegateModel(std::vector<kernel::Kernel *> *kernels, const std::vector<MSTensor> &inputs,
const std::vector<MSTensor> &outputs,
const std::map<kernel::Kernel *, const schema::Primitive *> &primitives, SchemaVersion version)
const std::vector<MSTensor> &outputs, const std::map<kernel::Kernel *, const T *> &primitives,
SchemaVersion version)
: kernels_(kernels), inputs_(inputs), outputs_(outputs), primitives_(primitives), version_(version) {}
/// \brief Destructor of MindSpore Lite DelegateModel.
@ -47,18 +49,24 @@ class MS_API DelegateModel {
///
/// \param[in] a kernel in DelegateModel kernels vector.
///
/// \return The schema::Primitive of The kernel.
const schema::Primitive *GetPrimitive(kernel::Kernel *kernel) const;
/// \return The Primitive of The kernel.
const T *GetPrimitive(kernel::Kernel *kernel) const {
if (primitives_.find(kernel) != primitives_.end()) {
return primitives_.at(kernel);
} else {
return nullptr;
}
}
/// \brief Get the begin iterator of the DelegateModel kernels vector.
///
/// \return The begin iterator of the DelegateModel kernels vector.
KernelIter BeginKernelIterator();
KernelIter BeginKernelIterator() { return kernels_->begin(); }
/// \brief Get the end iterator of the DelegateModel kernels vector.
///
/// \return The end iterator of the DelegateModel kernels vector.
KernelIter EndKernelIterator();
KernelIter EndKernelIterator() { return kernels_->end(); }
/// \brief Replace the continuous kernel supported by the delegate with a delegate graph kernel.
///
@ -66,7 +74,15 @@ class MS_API DelegateModel {
/// \param[in] end Define the end iterator of continuous kernel supported by the delegate.
///
/// \return The next iterator after graph_kernel, point to the next kernel that is not visited.
KernelIter Replace(KernelIter from, KernelIter end, kernel::Kernel *graph_kernel);
KernelIter Replace(KernelIter from, KernelIter end, kernel::Kernel *graph_kernel) {
size_t insert_index = from - BeginKernelIterator();
if (insert_index >= kernels_->size()) {
return BeginKernelIterator();
}
kernels_->erase(from, end);
kernels_->insert(BeginKernelIterator() + insert_index, graph_kernel);
return BeginKernelIterator() + insert_index + 1;
}
/// \brief Get the input tensors of DelegateModel.
///
@ -87,7 +103,7 @@ class MS_API DelegateModel {
std::vector<kernel::Kernel *> *kernels_;
const std::vector<mindspore::MSTensor> &inputs_;
const std::vector<mindspore::MSTensor> &outputs_;
const std::map<kernel::Kernel *, const schema::Primitive *> &primitives_;
const std::map<kernel::Kernel *, const T *> &primitives_;
SchemaVersion version_;
};
@ -111,7 +127,7 @@ class MS_API Delegate {
/// \note Build willed be called in Model::Build.
///
/// \param[in] model Define the delegate model to be built.
virtual Status Build(DelegateModel *model) = 0;
virtual Status Build(DelegateModel<schema::Primitive> *model) = 0;
};
} // namespace mindspore
#endif // MINDSPORE_INCLUDE_API_DELEGATE_H

View File

@ -35,8 +35,8 @@ MS_API MSContextHandle MSContextCreate();
/// \brief Destroy the context object.
///
/// \param[in] context Context object handle.
MS_API void MSContextDestroy(MSContextHandle context);
/// \param[in] context Context object handle address.
MS_API void MSContextDestroy(MSContextHandle *context);
/// \brief Set the number of threads at runtime.
///
@ -110,8 +110,8 @@ MS_API MSDeviceInfoHandle MSDeviceInfoCreate(MSDeviceType device_type);
/// \brief Destroy the device info object.
///
/// \param[in] device_info Device info object handle.
MS_API void MSDeviceInfoDestroy(MSDeviceInfoHandle device_info);
/// \param[in] device_info Device info object handle address.
MS_API void MSDeviceInfoDestroy(MSDeviceInfoHandle *device_info);
/// \brief Set provider's name.
///

View File

@ -52,8 +52,15 @@ MS_API MSModelHandle MSModelCreate();
/// \brief Destroy the model object. Only valid for Lite.
///
/// \param[in] model Model object handle address.
MS_API void MSModelDestroy(MSModelHandle *model);
/// \brief Set workspace for the model object. Only valid for Iot.
///
/// \param[in] model Model object handle.
MS_API void MSModelDestroy(MSModelHandle model);
/// \param[in] workspace Define the workspace address.
/// \param[in] workspace_size Define the workspace size.
MS_API void MSModelSetWorkspace(MSModelHandle model, void *workspace, size_t workspace_size);
/// \brief Build the model from model file buffer so that it can run on a device. Only valid for Lite.
///

View File

@ -42,8 +42,8 @@ MS_API MSTensorHandle MSTensorCreate(const char *name, MSDataType type, const in
/// \brief Destroy the tensor object.
///
/// \param[in] tensor Tensor object handle.
MS_API void MSTensorDestroy(MSTensorHandle tensor);
/// \param[in] tensor Tensor object handle address.
MS_API void MSTensorDestroy(MSTensorHandle *tensor);
/// \brief Obtain a deep copy of the tensor.
///

View File

@ -0,0 +1,26 @@
@rem Copyright 2021 Huawei Technologies Co., Ltd
@rem
@rem Licensed under the Apache License, Version 2.0 (the "License");
@rem you may not use this file except in compliance with the License.
@rem You may obtain a copy of the License at
@rem
@rem http://www.apache.org/licenses/LICENSE-2.0
@rem
@rem Unless required by applicable law or agreed to in writing, software
@rem distributed under the License is distributed on an "AS IS" BASIS,
@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@rem See the License for the specific language governing permissions and
@rem limitations under the License.
@rem ============================================================================
@echo off
@title mindspore_lite_quick_start_c_demo_build
SET BASEPATH=%CD%
IF NOT EXIST "%BASEPATH%/build" (
md build
)
cd %BASEPATH%/build
cmake -G "CodeBlocks - MinGW Makefiles" %BASEPATH%
cmake --build .

View File

@ -197,11 +197,6 @@ set(LITE_SRC
${KERNEL_REG_SRC}
)
set(LITE_SRC
${LITE_SRC}
${CMAKE_CURRENT_SOURCE_DIR}/delegate/delegate.cc
)
if(MSLITE_GPU_BACKEND STREQUAL opencl)
file(GLOB_RECURSE OPENCL_RUNTIME_SRC
${CMAKE_CURRENT_SOURCE_DIR}/runtime/gpu/opencl/*.cc

View File

@ -31,10 +31,11 @@ MSContextHandle MSContextCreate() {
return static_cast<MSContextHandle>(impl);
}
void MSContextDestroy(MSContextHandle context) {
if (context != nullptr) {
auto impl = static_cast<mindspore::Context::Data *>(context);
void MSContextDestroy(MSContextHandle *context) {
if (*context != nullptr) {
auto impl = static_cast<mindspore::Context::Data *>(*context);
delete impl;
*context = nullptr;
}
}
@ -144,10 +145,11 @@ MSDeviceInfoHandle MSDeviceInfoCreate(MSDeviceType device_type) {
return static_cast<MSDeviceInfoHandle>(impl);
}
void MSDeviceInfoDestroy(MSDeviceInfoHandle device_info) {
if (device_info != nullptr) {
auto impl = static_cast<mindspore::DeviceInfoContext *>(device_info);
void MSDeviceInfoDestroy(MSDeviceInfoHandle *device_info) {
if (*device_info != nullptr) {
auto impl = static_cast<mindspore::DeviceInfoContext *>(*device_info);
delete impl;
*device_info = nullptr;
}
}

View File

@ -309,19 +309,29 @@ MSModelHandle MSModelCreate() {
return static_cast<MSModelHandle>(impl);
}
void MSModelDestroy(MSModelHandle model) {
if (model != nullptr) {
auto impl = static_cast<mindspore::ModelC *>(model);
void MSModelDestroy(MSModelHandle *model) {
if (*model != nullptr) {
auto impl = static_cast<mindspore::ModelC *>(*model);
delete impl;
*model = nullptr;
}
}
void MSModelSetWorkspace(MSModelHandle model, void *workspace, size_t workspace_size) {
MS_LOG(ERROR) << "Unsupported Feature.";
return;
}
MSStatus MSModelBuild(MSModelHandle model, const void *model_data, size_t data_size, MSModelType model_type,
const MSContextHandle model_context) {
if (model == nullptr || model_data == nullptr || model_context == nullptr) {
MS_LOG(ERROR) << "param is nullptr.";
return kMSStatusLiteNullptr;
}
if (model_type == kMSModelTypeInvalid) {
MS_LOG(ERROR) << "param is invalid.";
return kMSStatusLiteParamInvalid;
}
mindspore::Context::Data *context = static_cast<mindspore::Context::Data *>(model_context);
auto impl = static_cast<mindspore::ModelC *>(model);
auto ret = impl->Build(model_data, data_size, static_cast<mindspore::ModelType>(model_type), context);
@ -334,6 +344,10 @@ MSStatus MSModelBuildFromFile(MSModelHandle model, const char *model_path, MSMod
MS_LOG(ERROR) << "param is nullptr.";
return kMSStatusLiteNullptr;
}
if (model_type == kMSModelTypeInvalid) {
MS_LOG(ERROR) << "param is invalid.";
return kMSStatusLiteParamInvalid;
}
mindspore::Context::Data *context = static_cast<mindspore::Context::Data *>(model_context);
auto impl = static_cast<mindspore::ModelC *>(model);
auto ret = impl->Build(model_path, static_cast<mindspore::ModelType>(model_type), context);

View File

@ -40,10 +40,11 @@ MSTensorHandle MSTensorCreate(const char *name, MSDataType type, const int64_t *
return impl;
}
void MSTensorDestroy(MSTensorHandle tensor) {
auto impl = static_cast<mindspore::MSTensor::Impl *>(tensor);
void MSTensorDestroy(MSTensorHandle *tensor) {
auto impl = static_cast<mindspore::MSTensor::Impl *>(*tensor);
if (impl != nullptr) {
delete impl;
*tensor = nullptr;
}
}

View File

@ -1,40 +0,0 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "include/api/delegate.h"
namespace mindspore {
const schema::Primitive *DelegateModel::GetPrimitive(kernel::Kernel *kernel) const {
if (primitives_.find(kernel) != primitives_.end()) {
return primitives_.at(kernel);
} else {
return nullptr;
}
}
KernelIter DelegateModel::BeginKernelIterator() { return kernels_->begin(); }
KernelIter DelegateModel::EndKernelIterator() { return kernels_->end(); }
KernelIter DelegateModel::Replace(KernelIter from, KernelIter end, kernel::Kernel *graph_kernel) {
size_t insert_index = from - BeginKernelIterator();
if (insert_index >= kernels_->size()) {
return BeginKernelIterator();
}
kernels_->erase(from, end);
kernels_->insert(BeginKernelIterator() + insert_index, graph_kernel);
return BeginKernelIterator() + insert_index + 1;
}
} // namespace mindspore

View File

@ -87,8 +87,8 @@ std::vector<mindspore::MSTensor> GetGraphOutTensors(const std::vector<T *> &ops)
}
template <typename T>
std::vector<mindspore::MSTensor> GraphInTensors(const std::vector<T *> &ops, DelegateModel *model, KernelIter from,
KernelIter end) {
std::vector<mindspore::MSTensor> GraphInTensors(const std::vector<T *> &ops, DelegateModel<schema::Primitive> *model,
KernelIter from, KernelIter end) {
auto in_tensors = GetGraphInTensors(ops);
std::vector<mindspore::MSTensor> all_in_tensors;
for (auto op : ops) {
@ -114,8 +114,8 @@ std::vector<mindspore::MSTensor> GraphInTensors(const std::vector<T *> &ops, Del
}
template <typename T>
std::vector<mindspore::MSTensor> GraphOutTensors(const std::vector<T *> &ops, DelegateModel *model, KernelIter from,
KernelIter end) {
std::vector<mindspore::MSTensor> GraphOutTensors(const std::vector<T *> &ops, DelegateModel<schema::Primitive> *model,
KernelIter from, KernelIter end) {
auto out_tensors = GetGraphOutTensors(ops);
std::vector<mindspore::MSTensor> all_out_tensors;
for (auto op : ops) {

View File

@ -169,7 +169,7 @@ Status NPUDelegate::Init() {
return mindspore::kSuccess;
}
Status NPUDelegate::Build(DelegateModel *model) {
Status NPUDelegate::Build(DelegateModel<schema::Primitive> *model) {
KernelIter from, end;
std::vector<NPUOp *> npu_ops;
int graph_index = 0;
@ -269,7 +269,8 @@ NPUOp *NPUDelegate::GetOP(kernel::Kernel *kernel, const schema::Primitive *primi
return npu_op;
}
std::vector<mindspore::MSTensor> GraphOutTensors(const std::vector<NPUOp *> &ops, DelegateModel *model, KernelIter from,
std::vector<mindspore::MSTensor> GraphOutTensors(const std::vector<NPUOp *> &ops,
DelegateModel<schema::Primitive> *model, KernelIter from,
KernelIter end) {
auto out_tensors = lite::GetGraphOutTensors(ops);
std::vector<mindspore::MSTensor> all_out_tensors;
@ -296,8 +297,8 @@ std::vector<mindspore::MSTensor> GraphOutTensors(const std::vector<NPUOp *> &ops
return out_tensors;
}
kernel::Kernel *NPUDelegate::CreateNPUGraph(const std::vector<NPUOp *> &ops, DelegateModel *model, KernelIter from,
KernelIter end) {
kernel::Kernel *NPUDelegate::CreateNPUGraph(const std::vector<NPUOp *> &ops, DelegateModel<schema::Primitive> *model,
KernelIter from, KernelIter end) {
auto in_tensors = lite::GetGraphInTensors(ops);
auto out_tensors = GraphOutTensors(ops, model, from, end);
auto graph_kernel = new (std::nothrow) NPUGraph(ops, npu_manager_, in_tensors, out_tensors);

View File

@ -34,13 +34,13 @@ class NPUDelegate : public Delegate {
Status Init() override;
Status Build(DelegateModel *model) override;
Status Build(DelegateModel<schema::Primitive> *model) override;
protected:
NPUOp *GetOP(kernel::Kernel *kernel, const schema::Primitive *primitive);
kernel::Kernel *CreateNPUGraph(const std::vector<NPUOp *> &ops, DelegateModel *model, KernelIter from,
KernelIter end);
kernel::Kernel *CreateNPUGraph(const std::vector<NPUOp *> &ops, DelegateModel<schema::Primitive> *model,
KernelIter from, KernelIter end);
NPUManager *npu_manager_ = nullptr;
NPUPassManager *pass_manager_ = nullptr;

View File

@ -135,7 +135,7 @@ Status TensorRTDelegate::Init() {
return mindspore::kSuccess;
}
Status TensorRTDelegate::Build(DelegateModel *model) {
Status TensorRTDelegate::Build(DelegateModel<schema::Primitive> *model) {
int ret = lite::SetCudaDevice(device_info_);
if (ret != RET_OK) {
return mindspore::kLiteError;
@ -209,8 +209,9 @@ TensorRTOp *TensorRTDelegate::FindTensorRTOp(kernel::Kernel *kernel, const schem
}
}
TensorRTSubGraph *TensorRTDelegate::CreateTensorRTGraph(const std::vector<TensorRTOp *> &ops, DelegateModel *model,
KernelIter from, KernelIter end) {
TensorRTSubGraph *TensorRTDelegate::CreateTensorRTGraph(const std::vector<TensorRTOp *> &ops,
DelegateModel<schema::Primitive> *model, KernelIter from,
KernelIter end) {
auto in_tensors = GraphInTensors<TensorRTOp>(ops, model, from, end);
auto out_tensors = GraphOutTensors<TensorRTOp>(ops, model, from, end);
auto *tensorrt_graph = new (std::nothrow) TensorRTSubGraph(ops, in_tensors, out_tensors, context_, device_info_,

View File

@ -40,13 +40,13 @@ class TensorRTDelegate : public Delegate {
Status Init() override;
Status Build(DelegateModel *model) override;
Status Build(DelegateModel<schema::Primitive> *model) override;
private:
TensorRTOp *FindTensorRTOp(kernel::Kernel *kernel, const schema::Primitive *primitive);
TensorRTSubGraph *CreateTensorRTGraph(const std::vector<TensorRTOp *> &ops, DelegateModel *model, KernelIter from,
KernelIter end);
TensorRTSubGraph *CreateTensorRTGraph(const std::vector<TensorRTOp *> &ops, DelegateModel<schema::Primitive> *model,
KernelIter from, KernelIter end);
std::unordered_map<schema::PrimitiveType, TensorRTGetOp> op_func_lists_;

View File

@ -402,8 +402,8 @@ int Scheduler::ReplaceDelegateKernels(std::vector<kernel::LiteKernel *> *dst_ker
ms_inputs_ = LiteTensorsToMSTensors(inputs_);
ms_outputs_ = LiteTensorsToMSTensors(outputs_);
auto schema_version = static_cast<SchemaVersion>(schema_version_);
DelegateModel *model =
new (std::nothrow) DelegateModel(&kernels, ms_inputs_, ms_outputs_, primitives_, schema_version);
DelegateModel<schema::Primitive> *model =
new (std::nothrow) DelegateModel<schema::Primitive>(&kernels, ms_inputs_, ms_outputs_, primitives_, schema_version);
if (model == nullptr) {
MS_LOG(ERROR) << "New delegate model failed.";
return RET_NULL_PTR;

View File

@ -62,10 +62,10 @@ class CustomDelegate : public Delegate {
Status Init() override { return mindspore::kSuccess; }
Status Build(DelegateModel *model) override;
Status Build(DelegateModel<schema::Primitive> *model) override;
};
Status CustomDelegate::Build(DelegateModel *model) {
Status CustomDelegate::Build(DelegateModel<schema::Primitive> *model) {
auto graph_kernel = new (std::nothrow) CustomSubgraph(model->inputs(), model->outputs());
if (graph_kernel == nullptr) {
return mindspore::kLiteNullptr;

View File

@ -60,7 +60,7 @@ TEST_F(ContextCTest, common_test) {
ASSERT_EQ(MSContextGetEnableParallel(context), true);
MSDeviceInfoHandle cpu_device_info = MSDeviceInfoCreate(kMSDeviceTypeCPU);
MSDeviceInfoDestroy(cpu_device_info);
MSDeviceInfoDestroy(&cpu_device_info);
cpu_device_info = MSDeviceInfoCreate(kMSDeviceTypeCPU);
MSDeviceInfoSetEnableFP16(cpu_device_info, true);
@ -68,6 +68,6 @@ TEST_F(ContextCTest, common_test) {
MSContextAddDeviceInfo(context, cpu_device_info);
MSContextAddDeviceInfo(context, npu_device_info);
MSContextDestroy(context);
MSContextDestroy(&context);
}
} // namespace mindspore

View File

@ -83,7 +83,7 @@ TEST_F(TensorCTest, common_test) {
ASSERT_EQ(MSTensorGetDataSize(clone), MSTensorGetDataSize(tensor));
ASSERT_TRUE(MSTensorGetData(clone) != MSTensorGetData(tensor));
MSTensorDestroy(tensor);
MSTensorDestroy(clone);
MSTensorDestroy(&tensor);
MSTensorDestroy(&clone);
}
} // namespace mindspore

View File

@ -302,7 +302,6 @@ chmod 444 ${GPU_MAPPING_OUTPUT_FILE}
# support for npu
npu_files=()
while IFS='' read -r line; do npu_files+=("$line"); done < <(ls mindspore/lite/src/delegate/delegate.cc)
while IFS='' read -r line; do npu_files+=("$line"); done < <(ls mindspore/lite/src/delegate/npu/*.cc)
while IFS='' read -r line; do npu_files+=("$line"); done < <(ls mindspore/lite/src/delegate/npu/op/*.cc)
while IFS='' read -r line; do npu_files+=("$line"); done < <(ls mindspore/lite/src/delegate/npu/pass/*.cc)