mindspore lite: new unified infer framework

This commit is contained in:
liu lili 2023-02-25 09:46:23 +08:00
parent 0898378bb9
commit 17629fc26d
32 changed files with 2071 additions and 79 deletions

View File

@ -38,6 +38,7 @@ enum DeviceType {
kAscend910,
kAscend310,
kCustomDevice,
kAllDevice,
// add new type here
kInvalidDeviceType = 100,
};
@ -223,6 +224,16 @@ void DeviceInfoContext::SetProvider(const std::string &provider) { SetProvider(S
std::string DeviceInfoContext::GetProviderDevice() const { return CharToString(GetProviderDeviceChar()); }
void DeviceInfoContext::SetProviderDevice(const std::string &device) { SetProviderDevice(StringToChar(device)); }
/// \brief Derived from DeviceInfoContext, The configuration of the model running auto on the Host Devices, include
/// CPU/GPU/NPU/Ascend310/Ascend910. This option is only valid for MindSpore Lite.
class MS_API AutoDeviceInfo : public DeviceInfoContext {
public:
/// \brief Get the type of this DeviceInfoContext.
///
/// \return Type of this DeviceInfoContext.
enum DeviceType GetDeviceType() const override { return DeviceType::kAllDevice; };
};
/// \brief Derived from DeviceInfoContext, The configuration of the model running on the CPU. This option is only valid
/// for MindSpore Lite.
class MS_API CPUDeviceInfo : public DeviceInfoContext {

View File

@ -34,6 +34,9 @@ if(MSLITE_ENABLE_CLOUD_FUSION_INFERENCE OR MSLITE_ENABLE_CLOUD_INFERENCE)
${CMAKE_CURRENT_SOURCE_DIR}/kernel/cpu/transpose_kernel_mod.cc
${CMAKE_CURRENT_SOURCE_DIR}/infer_session.cc
${CMAKE_CURRENT_SOURCE_DIR}/session/single_op_session.cc
${CMAKE_CURRENT_SOURCE_DIR}/session/delegate_session.cc
# ${CMAKE_CURRENT_SOURCE_DIR}/session/default_session.cc
${CMAKE_CURRENT_SOURCE_DIR}/session/factory.cc
${CMAKE_CURRENT_SOURCE_DIR}/infer_device_address.cc
${CMAKE_CURRENT_SOURCE_DIR}/utils/kernel_build_utils.cc
${CMAKE_CURRENT_SOURCE_DIR}/utils/kernel_graph_utils.cc
@ -41,8 +44,6 @@ if(MSLITE_ENABLE_CLOUD_FUSION_INFERENCE OR MSLITE_ENABLE_CLOUD_INFERENCE)
${CMAKE_CURRENT_SOURCE_DIR}/utils/runtime_utils.cc
${CMAKE_CURRENT_SOURCE_DIR}/utils/serialization.cc
${CMAKE_CURRENT_SOURCE_DIR}/utils/func_graph_utils.cc
${CMAKE_CURRENT_SOURCE_DIR}/session/delegate_session.cc
${CMAKE_CURRENT_SOURCE_DIR}/session/factory.cc
${CMAKE_CURRENT_SOURCE_DIR}/delegate/factory.cc
${CMAKE_CURRENT_SOURCE_DIR}/delegate/plugin/tensorrt_executor_plugin.cc
${CMAKE_CURRENT_SOURCE_DIR}/delegate/plugin/litert_executor_plugin.cc
@ -51,6 +52,8 @@ if(MSLITE_ENABLE_CLOUD_FUSION_INFERENCE OR MSLITE_ENABLE_CLOUD_INFERENCE)
${CMAKE_CURRENT_SOURCE_DIR}/delegate_graph_executor.cc
${CMAKE_CURRENT_SOURCE_DIR}/session/optimizer/tensorrt_optimizer.cc
${CMAKE_CURRENT_SOURCE_DIR}/delegate/graph_executor/litert/func_graph_reuse_manager.cc
# ${CMAKE_CURRENT_SOURCE_DIR}/graph_compiler/factory.cc
# ${CMAKE_CURRENT_SOURCE_DIR}/graph_runtime/factory.cc
)
if(MSLITE_ENABLE_BFC_MEMORY)
set(MSLITE_EXTEND_RUNTIME_SRC ${MSLITE_EXTEND_RUNTIME_SRC}

View File

@ -0,0 +1,80 @@
/**
* Copyright 2023 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_SRC_EXTENDRT_EXECUTION_FLOW_H_
#define MINDSPORE_LITE_SRC_EXTENDRT_EXECUTION_FLOW_H_
#include <vector>
#include "infer/execution_flow.h"
namespace mindspore::infer {
class ExecutionFlow : public abstract::ExecutionFlow {
public:
ExecutionFlow() = default;
virtual ~ExecutionFlow() {
for (auto tensor : inputs_) {
if (tensor != nullptr) {
delete tensor;
}
}
for (auto tensor : outputs_) {
if (tensor != nullptr) {
delete tensor;
}
}
for (auto kernel : kernels_) {
if (kernel != nullptr) {
delete kernel;
}
}
}
std::vector<abstract::Kernel *> GetKernels() override { return kernels_; }
void SetKernels(std::vector<abstract::Kernel *> kernels) { kernels_ = kernels; }
std::vector<abstract::Tensor *> GetInputs() override { return inputs_; }
void SetInputs(const std::vector<abstract::Tensor *> &inputs) override { inputs_ = inputs; }
std::vector<abstract::Tensor *> GetOutputs() override { return outputs_; }
void SetOutputs(const std::vector<abstract::Tensor *> &outputs) override { outputs_ = outputs; }
abstract::Context *GetContext() override { return context_; }
void SetContext(abstract::Context *context) override { context_ = context; }
const abstract::KernelCallBack &GetKernelBeforeCallBack() override { return before_; }
void SetKernelBeforeCallBack(const abstract::KernelCallBack &callback) override { before_ = callback; }
const abstract::KernelCallBack &GetKernelAfterCallBack() override { return after_; }
void SetKernelAfterCallBack(const abstract::KernelCallBack &callback) override { after_ = callback; }
private:
std::vector<abstract::Kernel *> kernels_;
std::vector<abstract::Tensor *> inputs_;
std::vector<abstract::Tensor *> outputs_;
abstract::Context *context_;
abstract::KernelCallBack before_;
abstract::KernelCallBack after_;
};
} // namespace mindspore::infer
#endif // MINDSPORE_LITE_SRC_EXTENDRT_EXECUTION_FLOW_H_

View File

@ -0,0 +1,96 @@
/**
* Copyright 2023 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_SRC_EXTENDRT_EXECUTION_PLAN_H_
#define MINDSPORE_LITE_SRC_EXTENDRT_EXECUTION_PLAN_H_
#include <memory>
#include <vector>
#include <unordered_map>
#include "infer/execution_plan.h"
namespace mindspore::infer {
class ExecutionPlan : public abstract::ExecutionPlan {
public:
ExecutionPlan() = default;
virtual ~ExecutionPlan() {
if (input_isolate_map_ != nullptr) {
delete input_isolate_map_;
input_isolate_map_ = nullptr;
}
if (output_isolate_map_) {
delete output_isolate_map_;
output_isolate_map_ = nullptr;
}
for (auto tensor : inputs_) {
if (tensor != nullptr) {
delete tensor;
}
}
for (auto tensor : outputs_) {
if (tensor != nullptr) {
delete tensor;
}
}
}
std::vector<std::shared_ptr<abstract::ExecutionFlow>> GetExecutionFLows() override { return execution_flows_; }
void SetExecutionFlows(std::vector<std::shared_ptr<abstract::ExecutionFlow>> execution_flows) override {
execution_flows_ = execution_flows;
}
void AddExecutionFlow(std::shared_ptr<ExecutionFlow> execution_flow) override {
execution_flows_.emplace_back(execution_flow);
}
FuncGraphPtr GetFuncGraph() override { return func_graph_; }
void SetFuncGraph(FuncGraphPtr func_graph) override { func_graph_ = func_graph; }
std::vector<abstract::Tensor *> GetInputs() override { return inputs_; }
void SetInputs(const std::vector<abstract::Tensor *> &inputs) override { inputs_ = inputs; }
std::vector<abstract::Tensor *> GetOutputs() override { return outputs_; }
void SetOutputs(const std::vector<abstract::Tensor *> &outputs) override { outputs_ = outputs; }
void SetInputsMap(std::unordered_map<Tensor *, Tensor *> *input_isolate_map) {
input_isolate_map_ = input_isolate_map;
}
std::unordered_map<Tensor *, Tensor *> *GetInputMap() { return input_isolate_map_; }
void SetOutputsMap(std::unordered_map<Tensor *, Tensor *> *output_isolate_map) {
output_isolate_map_ = output_isolate_map;
}
std::unordered_map<Tensor *, Tensor *> *GetOutputMap() { return output_isolate_map_; }
private:
std::vector<std::shared_ptr<abstract::ExecutionFlow>> execution_flows_;
FuncGraphPtr func_graph_;
std::vector<abstract::Tensor *> inputs_;
std::vector<abstract::Tensor *> outputs_;
std::unordered_map<Tensor *, Tensor *> *input_isolate_map_ = nullptr;
std::unordered_map<Tensor *, Tensor *> *output_isolate_map_ = nullptr;
};
} // namespace mindspore::infer
#endif // MINDSPORE_LITE_SRC_EXTENDRT_EXECUTION_PLAN_H_

View File

@ -0,0 +1,56 @@
/**
* Copyright 2023 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "extendrt/flow_executor.h"
#include "extendrt/execution_plan.h"
#include "litert/mindrt_executor.h"
namespace mindspore::infer {
FlowExecutor::FlowExecutor() { FlowExecutor("FlowExecutor"); }
FlowExecutor::FlowExecutor(const std::string &name, std::shared_ptr<abstract::ExecutionPlan> execution_plan) {
name_ = name;
execution_plan_ = execution_plan;
auto infer_execution_plan = std::dynamic_pointer_cast<infer::ExecutionPlan>(execution_plan_);
if (infer_execution_plan == nullptr) {
MS_LOG(ERROR) << "FlowExecutor::FlowExecutor Not Supported execution plan is passed";
} else {
executor_ = std::make_shared<mindspore::lite::MindrtExecutor>(infer_execution_plan->GetInputMap(),
infer_execution_plan->GetOutputMap);
}
}
Status FlowExecutor::Prepare(std::shared_ptr<abstract::ExecutionFlow> execution_flow) {
if (executor_ == nullptr) {
MS_LOG(ERROR) << "FlowExecutor::Prepare executor is nullptr";
return kLiteError;
}
if (execution_flow == nullptr) {
MS_LOG(ERROR) << "FlowExecutor::Prepare execution flow is nullptr";
return kLiteError;
}
return executor_->Prepare(execution_flow->GetKernels(), execution_flow->GetInputs(), execution_flow->GetOutputs(),
execution_flow->GetContext);
}
Status FlowExecutor::Execute() { return kSuccess; }
int FlowExecutor::Resize(const std::vector<abstract::Tensor *> &inputs, const std::vector<std::vector<int>> &dims) {
return kSuccess;
}
} // namespace mindspore::infer

View File

@ -0,0 +1,52 @@
/**
* Copyright 2023 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_SRC_EXTENDRT_FLOW_EXECUTOR_H_
#define MINDSPORE_LITE_SRC_EXTENDRT_FLOW_EXECUTOR_H_
#include <memory>
#include <vector>
#include <string>
#include "infer/executor.h"
#include "infer/execution_plan.h"
#include "litert/executor.h"
namespace mindspore::infer {
class FlowExecutor : public mindspore::infer::abstract::Executor {
public:
FlowExecutor();
// explicit FlowExecutor(const std::string &name);
explicit FlowExecutor(const std::string &name, std::shared_ptr<abstract::ExecutionPlan> execution_plan);
virtual ~FlowExecutor() = default;
const std::string &Name() override { return name_; }
Status Prepare(std::shared_ptr<abstract::ExecutionFlow> execution_flow) override;
Status Execute() override;
int Resize(const std::vector<abstract::Tensor *> &inputs, const std::vector<std::vector<int>> &dims) override;
private:
std::string name_;
std::shared_ptr<abstract::ExecutionFlow> execution_flow_;
std::shared_ptr<mindspore::lite::Executor> executor_;
std::shared_ptr<abstract::ExecutionPlan> execution_plan_;
};
} // namespace mindspore::infer
#endif // MINDSPORE_LITE_SRC_EXTENDRT_FLOW_EXECUTOR_H_

View File

@ -0,0 +1,234 @@
/**
* Copyright 2019-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <unordered_map>
#include <algorithm>
#include "extendrt/graph_compiler/default_graph_compiler.h"
#include "backend/graph_compiler/graph_partition.h"
#include "backend/graph_compiler/segment_runner.h"
#include "common/log.h"
namespace mindspore {
static const std::vector<PrimitivePtr> ms_infer_cut_list = {prim::kPrimReturn, prim::kPrimPartial,
prim::kPrimSwitch, prim::kPrimMakeTuple,
prim::kPrimBpropCut, prim::kPrimSwitchLayer};
static constexpr auto ms_infer_backend_name = "mindspore_lite_backend";
std::shared_ptr<abstract::ExecutionPlan> DefaultGraphCompiler::Compile(FuncGraphPtr graph) {
MS_LOG(INFO) << "DefaultGraphCompiler::Compile";
MS_LOG(DEBUG) << "DefaultGraphCompiler::Partition Partition FunctionGraph Begin";
auto graph_segments = Partition(graph);
if (graph_segments.empty()) {
MS_LOG(ERROR) << "DefaultGraphCompiler::Partition partition graph failed";
return nullptr;
}
MS_LOG(DEBUG) << "DefaultGraphCompiler::Partition Partition FunctionGraph End";
MS_LOG(DEBUG) << "DefaultGraphCompiler::Compile Schedule Graph Execute Plan Begin";
auto execution_plan = Schedule(graph_segments);
if (execution_plan == nullptr) {
MS_LOG(ERROR) << "DefaultGraphCompiler::Partition partition graph failed";
return nullptr;
}
MS_LOG(DEBUG) << "DefaultGraphCompiler::Compile Schedule Graph Execute Plan End";
return execution_plan;
}
std::vector<GraphSegmentPtr> DefaultGraphCompiler::Partition(const FuncGraphPtr &graph) {
auto partition = std::make_shared<GraphPartition>(ms_infer_cut_list, ms_infer_backend_name);
if (partition == nullptr) {
MS_LOG(ERROR) << "DefaultGraphCompiler::Partition create graph partition failed, maybe not enough memory";
return std::vector<GraphSegmentPtr>();
}
// if the context target is cpu, graph should convert to NHWC, call related pass
// multi_target set false
return partition->Partition(graph, false);
}
std::shared_ptr<abstract::ExecutionPlan> DefaultGraphCompiler::Schedule(
const std::vector<GraphSegmentPtr> &graph_segments, FuncGraphPtr func_graph) {
auto execution_plan = std::make_shared<infer::ExecutionPlan>();
anf_tensor_map_.clear();
std::unordered_map<Tensor *, Tensor *> *input_isolate_map = new std::unordered_map<Tensor *, Tensor *>();
std::unordered_map<Tensor *, Tensor *> *output_isolate_map = new std::unordered_map<Tensor *, Tensor *>();
// Convert FuncGraph Input and Output AnfNode to Tensor and save in Execution Plan
auto graph_inputs = func_grapn->get_inputs();
if (graph_inputs.empty()) {
MS_LOG(ERROR) << "DefaultGraphCompiler::Schedule get graph inputs node failed";
delete input_isolate_map;
delete output_isolate_map;
return nullptr;
}
auto graph_output = func_graph->output();
if (graph_output == nullptr) {
MS_LOG(ERROR) << "DefaultGraphCompiler::Schedule get graph output node failed";
delete input_isolate_map;
delete output_isolate_map;
return nullptr;
}
auto graph_input_tensors = CreateTensors(graph_inputs);
if (graph_input_tensors.size() != graph_inputs.size()) {
MS_LOG(ERROR) << "DefaultGraphCompiler::Schedule create graph input tensors failed";
delete input_isolate_map;
delete output_isolate_map;
return nullptr;
}
execution_plan->SetInputs(graph_input_tensors);
auto graph_output_tensor = CreateTensor(graph_output);
if (graph_output_tensor == nullptr) {
MS_LOG(ERROR) << "DefaultGraphCompiler::Schedule create graph output tensor failed";
delete input_isolate_map;
delete output_isolate_map;
return nullptr;
}
execution_plan->SetOutputs(graph_output_tensor);
for (auto graph_segment : graph_segments) {
FuncGraphPtr fg = nullptr;
AnfNodePtrList inputs;
AnfNodePtrList outputs;
std::tie(fg, inputs, outputs) = TransformSegmentToAnfGraph(graph_segment->nodes_);
auto execution_flow = this->Schedule(graph_segment, inputs, outputs);
if (execution_flow == nullptr) {
MS_LOG(ERROR) << "DefaultGraphCompiler::Schedule schedule graph segment failed";
delete input_isolate_map;
delete output_isolate_map;
return nullptr;
}
for (auto i = 0; i < execution_flow->GetInputs().size(); i++) {
auto input_tensor = execution_flow->GetInputs()[i];
auto input_node = inputs[i];
auto it = anf_tensor_map_.find(input_node);
if (it != anf_tensor_map_.end()) {
auto outter_tensor = it->second;
input_isolate_map[input_tensor] = outter_tensor;
} else {
anf_tensor_map_[input_node] = input_tensor;
}
}
for (auto i = 0; i < execution_flow->GetOutputs().size(); i++) {
auto output_tensor = execution_flow->GetOutputs()[i];
auto output_node = outputs[i];
auto it = anf_tensor_map_.find(output_node);
if (it != anf_tensor_map_.end()) {
auto outter_tensor = it->second;
output_isolate_map[output_tensor] = outter_tensor;
} else {
anf_tensor_map_[output_node] = output_tensor;
}
}
execution_plan->AddExecutionFlow(execution_flow);
}
execution_plan->SetInputMap(input_isolate_map);
execution_plan->SetOutputMap(output_isolate_map);
return execution_plan;
}
infer::abstract::Tensor *DefaultGraphCompiler::CreateTensor(AnfNodePtr node) {
if (node->isa<CNode>) {
} else if (node->isa<Parameter>) {
auto parameter_node = node->cast<ParameterPtr>();
if (parameter_node == nullptr) {
MS_LOG(ERROR) << "parameter node is nullptr";
return nullptr;
}
ShapeVector shape_vector;
TypeId data_type = kTypeUnknown;
auto status = GetDTAndShapeFromParameter(parameter_node, &data_type, &shape_vector);
if (status != kSuccess) {
MS_LOG(ERROR) << "get data_type and shape failed";
return nullptr;
}
if (data_type == kObjectTypeString) {
MS_LOG(ERROR) << "Not support String type";
return nullptr;
}
std::vector<int> lite_shape;
std::transform(shape_vector.begin(), shape_vector.end(), std::back_inserter(lite_shape),
[](int64_t dim) { return static_cast<int>(dim); });
auto lite_tensor = new lite::Tensor(data_type, lite_shape);
if (lite_tensor == nullptr) {
MS_LOG(ERROR) << "New tensor failed, may be memory is not enough";
return nullptr;
}
anf_tensor_map_[node] = lite_tensor;
return lite_tensor;
}
return nullptr;
}
Status DefaultGraphCompiler::GetDTAndShapeFromParameter(ParameterPtr parameter, TypeId *data_type,
ShapeVector *shape_vector) {
MS_ASSERT(parameter != nullptr && data_type != nullptr && shape_vector != nullptr);
auto abstract_base = parameter->abstract();
if (abstract_base == nullptr) {
MS_LOG(ERROR) << "abstract base is nullptr";
return kLiteError;
}
auto abstract_tensor = utils::cast<abstract::AbstractTensorPtr>(abstract_base);
if (abstract_tensor == nullptr) {
MS_LOG(ERROR) << "abstract tensor is nullptr";
return kLiteError;
}
return GetDTAndShapeFromAbTensor(abstract_tensor, data_type, shape_vector);
}
Status DefaultGraphCompiler::GetDTAndShapeFromAbTensor(const abstract::AbstractTensorPtr &abstract, TypeId *data_type,
ShapeVector *shape_vector) {
MS_ASSERT(abstract != nullptr && data_type != nullptr && shape_vector != nullptr);
if (abstract->element() == nullptr) {
MS_LOG(ERROR) << "'element' of abstract is nullptr";
return kLiteError;
}
auto type_ptr = abstract->element()->GetTypeTrack();
if (type_ptr == nullptr) {
MS_LOG(ERROR) << "type of abstract is nullptr";
return kLiteError;
}
*data_type = type_ptr->type_id();
if (!utils::isa<abstract::ShapePtr>(abstract->BuildShape())) {
MS_LOG(ERROR) << "Shape of Abstract of Parameter should be ShapePtr";
return kLiteError;
}
*shape_vector = utils::cast<abstract::ShapePtr>(abstract->BuildShape())->shape();
return kSuccess;
}
std::vector<infer::abstract::Tensor *> DefaultGraphCompiler::CreateTensors(const std::vector<AnfNodePtr> &nodes) {
std::vector<infer::abstract::Tensor *> tensors;
std::transform(nodes.begin(), nodes.end(), std::back_inserter(tensors),
[](AnfNodePtr node) { return CreateTensor(node); });
return tensors;
}
std::shared_ptr<abstract::ExecutionFlow> DefaultGraphCompiler::Schedule(const GraphSegmentPtr &graph_segment,
const std::vector<AnfNodePtr> &inputs,
const std::vector<AnfNodePtr> &outputs) {
// implementation by hangangqiang
return nullptr;
}
} // namespace mindspore

View File

@ -0,0 +1,54 @@
/**
* Copyright 2019-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_EXTENDRT_GRAPH_COMPILER_DEFAULT_GRAPH_COMPILER_H_
#define MINDSPORE_LITE_EXTENDRT_GRAPH_COMPILER_DEFAULT_GRAPH_COMPILER_H_
#include <memory>
#include <vector>
#include "infer/graph_compiler.h"
namespace mindspore {
class DefaultGraphCompiler : public mindspore::infer::abstract::GraphCompiler {
public:
DefaultGraphCompiler() {}
virtual ~DefaultGraphCompiler() = default;
std::shared_ptr<abstract::ExecutionPlan> Compile(FuncGraphPtr graph) override;
protected:
virtual std::vector<GraphSegmentPtr> Partition(const FuncGraphPtr &graph);
virtual std::shared_ptr<abstract::ExecutionPlan> Schedule(const std::vector<GraphSegmentPtr> &graph_segments,
FuncGraphPtr func_graph);
virtual std::shared_ptr<abstract::ExecutionFlow> Schedule(const GraphSegmentPtr &graph_segment,
const std::vector<AnfNodePtr> &inputs,
const std::vector<AnfNodePtr> &outputs);
private:
infer::abstract::Tensor *CreateTensor(AnfNodePtr node);
std::vector<infer::abstract::Tensor *> CreateTensors(const std::vector<AnfNodePtr> &nodes);
Status GetDTAndShapeFromParameter(ParameterPtr parameter, TypeId *data_type, ShapeVector *shape_vector);
Status GetDTAndShapeFromAbTensor(const abstract::AbstractTensorPtr &abstract, TypeId *data_type,
ShapeVector *shape_vector);
private:
mindspore::HashMap<AnfNodePtr, infer::abstract::Tensor *> anf_tensor_map_;
}
} // namespace mindspore
#endif // MINDSPORE_LITE_EXTENDRT_GRAPH_COMPILER_DEFAULT_GRAPH_COMPILER_H_

View File

@ -0,0 +1,37 @@
/**
* Copyright 2023 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "extendrt/graph_compiler/factory.h"
#include <functional>
#include <memory>
namespace mindspore {
GraphCompilerRegistry &GraphCompilerRegistry::GetInstance() {
static GraphCompilerRegistry instance;
return instance;
}
void GraphCompilerRegistry::RegCompiler(const mindspore::GraphCompilerType &type, const GraphCompilerRegFunc &creator) {
graph_compiler_map_[type] = creator;
}
std::shared_ptr<infer::GraphCompiler> GraphCompilerRegistry::GetCompiler(const mindspore::GraphCompilerType &type) {
auto it = graph_compiler_map_.find(type);
if (it == graph_compiler_map_.end()) {
return nullptr;
}
return it->second();
}
} // namespace mindspore

View File

@ -0,0 +1,55 @@
/**
* Copyright 2019-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_EXTENDRT_GRAPH_COMPILER_FACTORY_H_
#define MINDSPORE_LITE_EXTENDRT_GRAPH_COMPILER_FACTORY_H_
#include <functional>
#include <memory>
#include "extendrt/graph_compiler/type.h"
#include "infer/graph_compiler.h"
namespace mindspore {
using GraphCompiler = infer::abstract::GraphCompiler;
using GraphCompilerRegFunc = std::function<std::shared_ptr<GraphCompiler>()>;
class GraphCompilerRegistry {
public:
GraphCompilerRegistry() = default;
virtual ~GraphCompilerRegistry() = default;
static GraphCompilerRegistry &GetInstance();
void RegCompiler(const mindspore::GraphCompilerType &graph_compiler_type, const GraphCompilerRegFunc &creator);
std::shared_ptr<GraphCompiler> GetCompiler(const mindspore::GraphCompilerType &type);
private:
mindspore::HashMap<mindspore::GraphCompilerType, GraphCompilerRegFunc> graph_compiler_map_;
};
class GraphCompilerRegistrar {
public:
GraphCompilerRegistrar(const mindspore::GraphCompilerType &graph_compiler_type, const GraphCompilerRegFunc &creator) {
GraphCompilerRegistry::GetInstance().GetGraphCompiler(graph_compiler_type, creator);
}
~GraphCompilerRegistrar() = default;
};
#define REG_GRAPH_COMPILER(type, creator) static GraphCompilerRegistrar g_##type##GraphCompiler(type, creator);
} // namespace mindspore
#endif // MINDSPORE_LITE_EXTENDRT_GRAPH_COMPILER_FACTORY_H_

View File

@ -0,0 +1,25 @@
/**
* Copyright 2023 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_EXTENDRT_GRAPH_COMIPLER_TYPE_H_
#define MINDSPORE_LITE_EXTENDRT_GRAPH_COMIPLER_TYPE_H_
#include <memory>
#include <vector>
namespace mindspore {
enum GraphCompilerType { kDefaultCompiler = 0, kSingleOpSession, kLiteInferSession, kDelegateSession, kNoneCompiler };
} // namespace mindspore
#endif // MINDSPORE_LITE_EXTENDRT_GRAPH_COMIPLER_TYPE_H_

View File

@ -0,0 +1,120 @@
/**
* Copyright 2023 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "extendrt/graph_runtime/default_graph_runtime.h"
#include "extendrt/flow_executor.h"
#include "src/common/log.h"
namespace mindspore {
using ExecutionPlan = mindspore::infer::abstract::ExecutionPlan;
Status DefaultGraphRuntime::Prepare(std::shared_ptr<ExecutionPlan> execution_plan) {
MS_LOG(INFO) << "DefaultGraphRuntime::Prepare Begin";
if (execution_plan == nullptr) {
MS_LOG(ERROR) << "DefaultGraphRuntime::Prepare Execution Plan is nullptr.";
return kLiteNullptr;
}
execution_plan_ = execution_plan;
for (auto execution_flow : execution_plan->GetExecutionFLows()) {
auto executor = SelectExecutor(execution_flow);
if (executor == nullptr) {
MS_LOG(ERROR) << "DefaultGraphRuntime::Prepare Select Executor is nullptr.";
return kLiteNullptr;
}
MS_LOG(DEBUG) << "DefaultGraphRuntime::Prepare Prepare Execution Plan Begin of Executor " << executor->Name();
auto status = executor->Prepare(execution_flow);
if (status != kSuccess) {
MS_LOG(ERROR) << "DefaultGraphRuntime::Prepare Prepare Execution Plan Failed in Executor " << executor->Name();
return kLiteError;
}
MS_LOG(DEBUG) << "DefaultGraphRuntime::Prepare Prepare Execution Plan End";
}
MS_LOG(INFO) << "AbstractRuntime::Prepare End";
return kSuccess;
}
Status DefaultGraphRuntime::Execute() {
MS_LOG(INFO) << "DefaultGraphRuntime::Execute Begin";
if (execution_plan_ == nullptr) {
MS_LOG(ERROR) << "DefaultGraphRuntime::Execute Execution Plan is nullptr.";
return kLiteNullptr;
}
for (auto execution_flow : execution_plan_->GetExecutionFLows()) {
auto executor = SelectExecutor(execution_flow);
if (executor == nullptr) {
MS_LOG(ERROR) << "DefaultGraphRuntime::Execute Select Executor is nullptr.";
return kLiteNullptr;
}
MS_LOG(DEBUG) << "DefaultGraphRuntime::Execute Execution Plan Begin of Executor " << executor->Name();
auto status = executor->Execute();
if (status != kSuccess) {
MS_LOG(ERROR) << "DefaultGraphRuntime::Execute Execution Plan Failed in Executor " << executor->Name();
return kLiteError;
}
MS_LOG(DEBUG) << "DefaultGraphRuntime::Execute Prepare Execution Plan End";
}
MS_LOG(INFO) << "DefaultGraphRuntime::Execute End";
return kSuccess;
}
Status DefaultGraphRuntime::Execute(const std::vector<abstract::Tensor *> &inputs,
const std::vector<abstract::Tensor *> &outputs, abstract::KernelCallBack before,
abstract::KernelCallBack after) {
MS_LOG(INFO) << "DefaultGraphRuntime::Execute Begin";
if (execution_plan_ == nullptr) {
MS_LOG(ERROR) << "DefaultGraphRuntime::Execute Execution Plan is nullptr.";
return kLiteNullptr;
}
for (auto &execution_flow : execution_plan_->GetExecutionFLows()) {
auto executor = SelectExecutor(execution_flow);
if (executor == nullptr) {
MS_LOG(ERROR) << "DefaultGraphRuntime::Execute Select Executor is nullptr.";
return kLiteNullptr;
}
MS_LOG(DEBUG) << "DefaultGraphRuntime::Execute Execution Plan Begin of Executor " << executor->Name();
execution_flow->SetInputs(inputs);
execution_flow->SetOutputs(outputs);
execution_flow->SetKernelBeforeCallBack(before);
execution_flow->SetKernelAfterCallBack(after);
auto status = executor->Execute();
if (status != kSuccess) {
MS_LOG(ERROR) << "DefaultGraphRuntime::Execute Execution Plan Failed in Executor " << executor->Name();
return kLiteError;
}
MS_LOG(DEBUG) << "DefaultGraphRuntime::Execute Prepare Execution Plan End";
}
MS_LOG(INFO) << "DefaultGraphRuntime::Execute End";
return kSuccess;
}
std::shared_ptr<abstract::Executor> DefaultGraphRuntime::SelectExecutor(
const std::shared_ptr<abstract::ExecutionFlow> &execution_flow) {
auto it = executor_map_.find(execution_flow);
if (it == executor_map_.end()) {
// create a new executor for execution flow
auto executor = std::make_shared<infer::FlowExecutor>("flow-executor");
executor_map_[execution_flow] = executor;
return executor;
}
return it->second;
}
} // namespace mindspore

View File

@ -0,0 +1,46 @@
/**
* Copyright 2023 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_SRC_EXTENDRT_GRAPH_RUNTIME_DEFAULT_GRAPH_RUNTIME_H_
#define MINDSPORE_LITE_SRC_EXTENDRT_GRAPH_RUNTIME_DEFAULT_GRAPH_RUNTIME_H_
#include <vector>
#include <memory>
#include "infer/graph_runtime.h"
namespace mindspore {
class DefaultGraphRuntime : public mindspore::infer::abstract::GraphRuntime {
public:
DefaultGraphRuntime() = default;
virtual ~DefaultGraphRuntime() = default;
Status Prepare(std::shared_ptr<abstract::ExecutionPlan> execution_plan) override;
Status Execute() override;
Status Execute(const std::vector<abstract::Tensor *> &inputs, const std::vector<abstract::Tensor *> &outputs,
abstract::KernelCallBack before = nullptr, abstract::KernelCallBack after = nullptr) override;
private:
std::shared_ptr<abstract::Executor> SelectExecutor(const std::shared_ptr<abstract::ExecutionFlow> &execution_flow);
private:
std::shared_ptr<abstract::ExecutionPlan> execution_plan_ = nullptr;
mindspore::HashMap<std::shared_ptr<abstract::ExecutionFlow>, std::shared_ptr<abstract::Executor>> executor_map_;
};
} // namespace mindspore
#endif // MINDSPORE_LITE_SRC_EXTENDRT_GRAPH_RUNTIME_DEFAULT_GRAPH_RUNTIME_H_

View File

@ -0,0 +1,37 @@
/**
* Copyright 2019-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "extendrt/graph_runtime/factory.h"
#include <functional>
#include <memory>
namespace mindspore {
GraphRuntimRegistry &GraphRuntimRegistry::GetInstance() {
static GraphRuntimRegistry instance;
return instance;
}
void GraphRuntimRegistry::RegRuntime(const mindspore::GraphRuntimeType &type, const GraphRuntimeRegFunc &creator) {
graph_runtime_map_[type] = creator;
}
std::shared_ptr<infer::GraphRuntime> GraphRuntimRegistry::GetRuntime(const mindspore::GraphRuntimeType &type) {
auto it = graph_runtime_map_.find(type);
if (it == graph_runtime_map_.end()) {
return nullptr;
}
return it->second();
}
} // namespace mindspore

View File

@ -0,0 +1,55 @@
/**
* Copyright 2019-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_SRC_EXTENDRT_GRAPH_RUNTIME_FACTORY_H_
#define MINDSPORE_LITE_SRC_EXTENDRT_GRAPH_RUNTIME_FACTORY_H_
#include <functional>
#include <memory>
#include "extendrt/graph_runtime/type.h"
#include "infer/graph_runtime.h"
namespace mindspore {
using GraphRuntime = infer::abstract::GraphRuntime;
using GraphRuntimeRegFunc = std::function<std::shared_ptr<GraphRuntime>()>;
class GraphRuntimRegistry {
public:
GraphRuntimRegistry() = default;
virtual ~GraphRuntimRegistry() = default;
static GraphRuntimRegistry &GetInstance();
void RegRuntime(const GraphRuntimeType &type, const GraphRuntimeRegFunc &creator);
std::shared_ptr<GraphRuntime> GetRuntime(const mindspore::GraphRuntimeType &type);
private:
mindspore::HashMap<GraphRuntimeType, GraphRuntimeRegFunc> graph_runtime_map_;
};
class GraphRuntimeRegistrar {
public:
GraphRuntimeRegistrar(const mindspore::GraphRuntimeType &type, const GraphRuntimeRegFunc &creator) {
GraphRuntimRegistry::GetInstance().RegRuntime(type, creator);
}
~GraphRuntimeRegistrar() = default;
};
#define REG_GRAPH_RUNTIME(type, creator) static GraphRuntimeRegistrar g_##type##GraphRuntime(type, creator);
} // namespace mindspore
#endif // MINDSPORE_LITE_SRC_EXTENDRT_GRAPH_RUNTIME_FACTORY_H_

View File

@ -0,0 +1,25 @@
/**
* Copyright 2019-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_EXTENDRT_GRAPH_RUNTIME_TYPE_H_
#define MINDSPORE_LITE_EXTENDRT_GRAPH_RUNTIME_TYPE_H_
#include <memory>
#include <vector>
namespace mindspore {
enum GraphRuntimeType { kDefaultRuntime = 0, kSingleOpSession, kLiteInferSession, kDelegateSession, kNoneRuntime };
} // namespace mindspore
#endif // MINDSPORE_LITE_EXTENDRT_GRAPH_RUNTIME_TYPE_H_

View File

@ -30,61 +30,6 @@
#include "extendrt/delegate/plugin/ascend_ge_executor_plugin.h"
namespace mindspore {
static const std::vector<PrimitivePtr> ms_infer_cut_list = {prim::kPrimReturn, prim::kPrimPartial,
prim::kPrimSwitch, prim::kPrimMakeTuple,
prim::kPrimBpropCut, prim::kPrimSwitchLayer};
static bool is_infer_single_op = true;
static bool is_use_lite_session = false;
/// \brief Default Infer Session Implementation, using kernelmod, not implemented now.
class DefaultInferSession : public InferSession {
public:
explicit DefaultInferSession(const std::shared_ptr<Context> &context) {}
virtual ~DefaultInferSession() = default;
Status Init(const std::shared_ptr<Context> &context) override;
Status CompileGraph(FuncGraphPtr graph, const void *data = nullptr, size_t size = 0) override;
Status RunGraph(const std::vector<tensor::Tensor> &inputs, std::vector<tensor::Tensor> *outputs) override;
Status RunGraph(const std::vector<tensor::Tensor> &inputs, std::vector<tensor::Tensor> *outputs,
const MSKernelCallBack &before, const MSKernelCallBack &after) override;
std::vector<MutableTensorImplPtr> GetOutputs() override;
std::vector<MutableTensorImplPtr> GetInputs() override;
std::vector<std::string> GetOutputNames() override;
std::vector<std::string> GetInputNames() override;
MutableTensorImplPtr GetOutputByTensorName(const std::string &tensorName) override;
MutableTensorImplPtr GetInputByTensorName(const std::string &name) override;
private:
KernelGraphUtilsPtr kernel_graph_utils_;
KernelGraphPtr kernel_graph_;
std::vector<KernelGraphPtr> kernel_graphs_;
};
Status DefaultInferSession::Init(const std::shared_ptr<Context> &context) {
MS_LOG(INFO) << "DefaultInferSession::Init";
kernel_graph_utils_ = std::make_shared<mindspore::KernelGraphUtils>();
partition_ = std::make_shared<compile::GraphPartition>(ms_infer_cut_list, "ms");
return kSuccess;
}
Status DefaultInferSession::CompileGraph(FuncGraphPtr graph, const void *data, size_t size) {
MS_LOG(INFO) << "DefaultInferSession::CompileGraph";
return kSuccess;
}
Status DefaultInferSession::RunGraph(const std::vector<tensor::Tensor> &inputs, std::vector<tensor::Tensor> *outputs,
const MSKernelCallBack &before, const MSKernelCallBack &after) {
return kSuccess;
}
Status DefaultInferSession::RunGraph(const std::vector<tensor::Tensor> &inputs, std::vector<tensor::Tensor> *outputs) {
return kSuccess;
}
std::vector<MutableTensorImplPtr> DefaultInferSession::GetOutputs() { return {}; }
std::vector<MutableTensorImplPtr> DefaultInferSession::GetInputs() { return {}; }
std::vector<std::string> DefaultInferSession::GetOutputNames() { return std::vector<std::string>(); }
std::vector<std::string> DefaultInferSession::GetInputNames() { return std::vector<std::string>(); }
MutableTensorImplPtr DefaultInferSession::GetOutputByTensorName(const std::string &tensorName) { return nullptr; }
MutableTensorImplPtr DefaultInferSession::GetInputByTensorName(const std::string &name) { return nullptr; }
std::shared_ptr<InferSession> InferSession::CreateSession(const std::shared_ptr<Context> &context,
const ConfigInfos &config_info) {
HandleContext(context);
@ -148,6 +93,10 @@ void InferSession::HandleContext(const std::shared_ptr<Context> &context) {
}
continue;
}
if (device_info->GetDeviceType() == kAllDevice) {
// Auto Device: MSLite will detect available device and run graph/sub-graph on suitable device by its scheduler
continue;
}
}
}
@ -165,23 +114,12 @@ SessionType InferSession::SelectSession(const std::shared_ptr<Context> &context)
if (device_context->GetDeviceType() == kGPU || device_context->GetDeviceType() == kCPU) {
return kDelegateSession;
}
if (device_context->GetDeviceType() == kAllDevice) {
// Default Session support auto device context
return kDefaultSession;
}
}
if (is_infer_single_op) {
return kSingleOpSession;
}
if (is_use_lite_session) {
return kLiteInferSession;
}
return kDefaultSession;
}
static std::shared_ptr<InferSession> DefaultSessionCreator(const std::shared_ptr<Context> &ctx,
const ConfigInfos &config_infos) {
auto session = std::make_shared<DefaultInferSession>(ctx);
session->Init(ctx);
return session;
}
REG_SESSION(kDefaultSession, DefaultSessionCreator);
} // namespace mindspore

View File

@ -135,8 +135,6 @@ class InferSession : public std::enable_shared_from_this<InferSession> {
// FuncGraph pointer for model.
FuncGraphPtr graph_;
// Graph Partition Manager for control flow, not implemented.
compile::GraphPartitionPtr partition_;
}; // namespace mindspore
} // namespace mindspore
#endif

View File

@ -0,0 +1,244 @@
/**
* Copyright 2019-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <algorithm>
#include <utility>
#include "extendrt/session/default_session.h"
#include "plugin/factory/ms_factory.h"
#include "extendrt/session/factory.h"
#include "extendrt/graph_compiler/factory.h"
#include "extendrt/graph_runtime/factory.h"
#include "backend/graph_compiler/graph_partition.h"
#include "litert/cxx_api/tensor/tensor_impl.h"
namespace mindspore {
static const std::vector<PrimitivePtr> ms_infer_cut_list = {prim::kPrimReturn, prim::kPrimPartial,
prim::kPrimSwitch, prim::kPrimMakeTuple,
prim::kPrimBpropCut, prim::kPrimSwitchLayer};
Status DefaultInferSession::Init(const std::shared_ptr<Context> &context) {
MS_LOG(INFO) << "DefaultInferSession::Init";
context_ = context;
// Set MSContext::GetInstance param?
// init compiler and runtime according to context
compiler_ = GraphCompilerRegistry::GetInstance()->GetCompiler(kDefaultCompiler);
if (compiler_ == nullptr) {
MS_LOG(ERROR) << "DefaultInferSession::Init Get Compiler is nullptr";
return kLiteNullptr;
}
runtime_ = GraphRuntimRegistry::GetInstance()->GetRuntime(kDefaultRuntime);
if (runtime_ == nullptr) {
MS_LOG(ERROR) << "DefaultInferSession::Init Get Runtime is nullptr";
return kLiteNullptr;
}
return kSuccess;
}
Status DefaultInferSession::CompileGraph(FuncGraphPtr graph, const void *data, size_t size) {
MS_LOG(INFO) << "DefaultInferSession::CompileGraph";
MS_LOG(DEBUG) << "DefaultInferSession::CompileGraph Compile Graph Begin";
auto compiler = this->GetGraphCompiler();
if (compiler == nullptr) {
MS_LOG(ERROR) << "DefaultInferSession::CompileGraph Compiler in Infer Session is null";
return kLiteNullptr;
}
auto execution_plan = compiler->Compile(graph);
if (execution_plan == nullptr) {
MS_LOG(ERROR) << "DefaultInferSession::CompileGraph Compile Graph Failed, Execution plan is null";
return kLiteNullptr;
}
MS_LOG(DEBUG) << "DefaultInferSession::CompileGraph Compile Graph End";
MS_LOG(DEBUG) << "DefaultInferSession::CompileGraph Prepare ExecutionPlan Begin";
auto runtime = this->GetRuntime();
if (runtime == nullptr) {
MS_LOG(ERROR) << "DefaultInferSession::CompileGraph Runtime in Infer Session is null";
return kLiteNullptr;
}
auto status = runtime->Prepare(execution_plan);
if (status != kSuccess) {
MS_LOG(ERROR) << "DefaultInferSession::CompileGraph Prepare Execution Plan Failed";
return status;
}
MS_LOG(DEBUG) << "DefaultInferSession::CompileGraph Prepare ExecutionPlan End";
return kSuccess;
}
Status DefaultInferSession::RunGraph(const std::vector<tensor::Tensor> &inputs, std::vector<tensor::Tensor> *outputs,
const MSKernelCallBack &before, const MSKernelCallBack &after) {
MS_LOG(DEBUG) << "DefaultInferSession::RunGraph Execute ExecutionPlan Begin";
auto runtime = this->GetRuntime();
if (runtime_ == nullptr) {
MS_LOG(ERROR) << "DefaultInferSession::RunGraph Runtime in Infer Session is null";
return kLiteNullptr;
}
// Convert tensor::Tensor to lite::Tensor, see litert cxx_api model
// auto inner_inputs = xxx(inputs);
// auto inner_outputs = xxx(outputs);
auto inner_inputs = runtime->GetInputs();
auto inner_outputs = runtime->GetOutputs();
auto status = CopyDataToInnerTensors(inputs, inner_inputs);
if (status != kSuccess) {
MS_LOG(ERROR) << "DefaultInferSession::RunGraph Copy Data Pointer to input tensors failed";
return status;
}
status = CopyDataToInnerTensors(outputs, inner_outputs);
if (status != kSuccess) {
MS_LOG(ERROR) << "DefaultInferSession::RunGraph Copy Data Pointer to output tensors failed";
return status;
}
status = runtime->Execute(inner_inputs, inner_outputs);
if (status != kSuccess) {
MS_LOG(ERROR) << "DefaultInferSession::RunGraph Execute Execution Plan Failed";
return status;
}
*outputs = LiteTensorToTensor(inner_outputs);
if (outputs->size() != inner_outputs.size()) {
MS_LOG(ERROR) << "DefaultInferSession::RunGraph Convert output tensors failed";
return kLiteNullptr;
}
MS_LOG(DEBUG) << "DefaultInferSession::RunGraph Execute ExecutionPlan End";
return kSuccess;
}
Status DefaultInferSession::RunGraph(const std::vector<tensor::Tensor> &inputs, std::vector<tensor::Tensor> *outputs) {
return RunGraph(inputs, outputs, nullptr, nullptr);
}
std::vector<MutableTensorImplPtr> DefaultInferSession::GetOutputs() {
auto runtime = this->GetRuntime();
if (runtime_ == nullptr) {
MS_LOG(ERROR) << "DefaultInferSession::GetOutputs Runtime in Infer Session is null";
return kLiteNullptr;
}
auto lite_outputs = runtime->GetOutputs();
MS_LOG(DEBUG) << "DefaultInferSession::GetOutputs end";
return AbstractTensorsToTensorImpls(lite_outputs);
}
std::vector<MutableTensorImplPtr> DefaultInferSession::GetInputs() {
auto runtime = this->GetRuntime();
if (runtime_ == nullptr) {
MS_LOG(ERROR) << "DefaultInferSession::GetOutputs Runtime in Infer Session is null";
return kLiteNullptr;
}
auto lite_inputs = runtime->GetInputs();
MS_LOG(DEBUG) << "DefaultInferSession::GetOutputs end";
return AbstractTensorsToTensorImpls(lite_inputs);
}
std::vector<std::string> DefaultInferSession::GetOutputNames() { return std::vector<std::string>(); }
std::vector<std::string> DefaultInferSession::GetInputNames() { return std::vector<std::string>(); }
MutableTensorImplPtr DefaultInferSession::GetOutputByTensorName(const std::string &tensorName) { return nullptr; }
MutableTensorImplPtr DefaultInferSession::GetInputByTensorName(const std::string &name) { return nullptr; }
Status DefaultInferSession::CopyDataToInnerTensors(const std::vector<tensor::Tensor> &tensors,
std::vector<abstract::Tensor *> inner_tensors) {
if (tensors.size() == inner_tensors.size()) {
MS_LOG(EXCEPTION) << "user input size " << tensors.size() << " is not equal to graphp input size "
<< inner_tensors.size();
}
std::vector<void *> old_data;
for (size_t i = 0; i < tensors.size(); i++) {
auto &user_input = tensors.at(i);
auto input = inner_tensors.at(i);
if (user_input.data_type() != input->data_type()) {
// ResetTensorData(old_data, input_tensors);
MS_LOG(EXCEPTION) << "Tensor " << user_input.id() << " has a different data type from input"
<< input->tensor_name() << ".";
}
if (user_input.data_c() == nullptr) {
// ResetTensorData(old_data, input_tensors);
MS_LOG(EXCEPTION) << "Tensor " << user_input.id() << " has no data.";
}
old_data.push_back(input->data());
if (input->data_type() == kObjectTypeString) {
std::vector<int32_t> shape =
TruncateShape(user_input.shape_c(), input->data_type(), user_input.DataSize(), false);
if (shape.empty() && !(user_input.shape_c().empty())) {
// ResetTensorData(old_data, input_tensors);
MS_LOG(EXCEPTION) << "Input dims of tensor " << user_input.id() << " is invalid.";
}
input->set_shape(shape);
input->set_data(user_input.data_c(), false);
} else {
if (user_input.data_c() != input->data()) {
if (input->Size() != user_input.Size()) {
// ResetTensorData(old_data, input_tensors);
#ifndef ENABLE_LITE_ACL
MS_LOG(EXCEPTION) << "Tensor " << user_input.id() << " has wrong data size.";
#else
MS_LOG(WARNING) << "Please check tensor " << user_input.id()
<< " has been modified data size by DVPP method.";
std::vector<int> truncate_shape = {static_cast<int>(user_input.DataSize())};
input->set_shape(truncate_shape);
#endif
}
input->set_data(user_input.data_c(), false);
}
}
}
return kSuccess;
}
std::vector<MutableTensorImplPtr> &DefaultInferSession::AbstractTensorsToTensorImpls(
const std::vector<abstract::Tensor *> &abstract_tensors) {
std::vector<std::shared_ptr<LiteTensorImpl>> tensorImpls;
tensorImpls.reserve(abstract_tensors.size());
(void)std::transform(abstract_tensors.begin(), abstract_tensors.end(), std::back_inserter(tensorImpls),
[](abstract::Tensor *tensor) { return std::make_shared<LiteTensorImpl>(tensor); });
return tensorImpls;
}
std::vector<mindspore::tensor::Tensor> DefaultInferSession::LiteTensorToTensor(
const std::vector<abstract::Tensor *> &abstract_tensors) {
std::vector<mindspore::tensor::Tensor> tensors;
for (auto abstract_tensor : abstract_tensors) {
if (abstract_tensor == nullptr) {
MS_LOG(ERROR) << "DefaultInferSession::LiteTensorToTensor get nullptr tensor";
return std::vector<mindspore::tensor::Tensor>{};
}
auto type_id = abstract_tensor->data_type();
auto shape = abstract_tensor->shape();
auto data = abstract_tensor->MutableData();
auto data_size = abstract_tensor->Size();
auto ref_tensor_data =
std::make_shared<TensorRefData>(data, abstract_tensor->ElementNum(), data_size, shape.size());
mindspore::tensor::Tensor tensor(type_id, shape, ref_tensor_data);
auto device_address = abstract_tensor->device_data();
if (device_address != nullptr) {
auto lite_device_address = std::make_shared<LiteDeviceAddress>(device_address, abstract_tensor->DataSize());
tensor.set_device_address(lite_device_address);
}
tensors.emplace_back(std::move(tensor));
}
return tensors;
}
static std::shared_ptr<InferSession> DefaultSessionCreator(const std::shared_ptr<Context> &ctx,
const ConfigInfos &config_infos) {
auto session = std::make_shared<DefaultInferSession>(ctx);
session->Init(ctx);
return session;
}
REG_SESSION(kDefaultSession, DefaultSessionCreator);
} // namespace mindspore

View File

@ -0,0 +1,67 @@
/**
* Copyright 2019-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_EXTENDRT_SESSION_DEFAULT_SESSION_H_
#define MINDSPORE_LITE_EXTENDRT_SESSION_DEFAULT_SESSION_H_
#include <vector>
#include <string>
#include <memory>
#include <map>
#include "extendrt/infer_session.h"
#include "infer/graph_compiler.h"
#include "infer/graph_runtime.h"
namespace mindspore {
/// \brief Default Infer Session Implementation, using kernelmod, not implemented now.
class DefaultInferSession : public InferSession {
public:
explicit DefaultInferSession(const std::shared_ptr<Context> &context) { context_ = context; }
virtual ~DefaultInferSession() = default;
Status Init(const std::shared_ptr<Context> &context) override;
Status CompileGraph(FuncGraphPtr graph, const void *data = nullptr, size_t size = 0) override;
Status RunGraph(const std::vector<tensor::Tensor> &inputs, std::vector<tensor::Tensor> *outputs) override;
Status RunGraph(const std::vector<tensor::Tensor> &inputs, std::vector<tensor::Tensor> *outputs,
const MSKernelCallBack &before, const MSKernelCallBack &after) override;
std::vector<MutableTensorImplPtr> GetOutputs() override;
std::vector<MutableTensorImplPtr> GetInputs() override;
std::vector<std::string> GetOutputNames() override;
std::vector<std::string> GetInputNames() override;
MutableTensorImplPtr GetOutputByTensorName(const std::string &tensorName) override;
MutableTensorImplPtr GetInputByTensorName(const std::string &name) override;
protected:
virtual std::shared_ptr<infer::abstract::GraphCompiler> GetCompiler() { return compiler_; }
virtual std::shared_ptr<infer::abstract::GraphRuntime> GetRuntime() { return runtime_; }
private:
Status CopyDataToInnerTensors(const std::vector<tensor::Tensor> &tensors,
std::vector<abstract::Tensor *> inner_tensors);
std::vector<MutableTensorImplPtr> &AbstractTensorsToTensorImpls(
const std::vector<abstract::Tensor *> &abstract_tensors);
std::vector<mindspore::tensor::Tensor> LiteTensorToTensor(const std::vector<abstract::Tensor *> &abstract_tensors);
private:
std::shared_ptr<infer::abstract::GraphCompiler> compiler_;
std::shared_ptr<infer::abstract::GraphRuntime> runtime_;
const std::shared_ptr<Context> &context_;
};
} // namespace mindspore
#endif // MINDSPORE_LITE_EXTENDRT_SESSION_DEFAULT_SESSION_H_

View File

@ -0,0 +1,31 @@
/**
* Copyright 2023 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_INFER_CONTEXT_H_
#define MINDSPORE_LITE_INFER_CONTEXT_H_
#include <memory>
#include "litert/inner_context.h"
namespace mindspore::infer::abstract {
using Context = mindspore::lite::InnerContext;
// class Context : public std::enable_shared_from_this<Context> {
// public:
// virtual ~Context() = default;
// };
} // namespace mindspore::infer::abstract
#endif // MINDSPORE_LITE_INFER_CONTEXT_H_

View File

@ -0,0 +1,105 @@
/**
* Copyright 2023 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_INFER_EXECUTION_FLOW_H_
#define MINDSPORE_LITE_INFER_EXECUTION_FLOW_H_
#include <vector>
#include <memory>
#include "infer/context.h"
#include "infer/kernel.h"
#include "infer/kernel_callback.h"
namespace mindspore::infer::abstract {
class ExecutionFlow : public std::enable_shared_from_this<ExecutionFlow> {
public:
virtual ~ExecutionFlow() = default;
/// \brief Get list of kernel need to run.
///
/// \return vector of Kernel.
virtual std::vector<Kernel *> GetKernels() = 0;
/// \brief Set list of kernel need to run.
///
/// \param[in] kernels, list of kernels
///
/// \return void.
virtual void SetKernels(const std::vector<Kernel *> &kernels) = 0;
/// \brief Get list of inputs for the execution flow.
///
/// \return vector of Tensor.
virtual std::vector<Tensor *> GetInputs() = 0;
/// \brief Set input tensors need to run.
///
/// \param[in] inputs, list of input tensor
///
/// \return void.
virtual void SetInputs(const std::vector<Tensor *> &inputs) = 0;
/// \brief Get list of outputs for the execution flow.
///
/// \return vector of Tensor.
virtual std::vector<Tensor *> GetOutputs() = 0;
/// \brief Set output tensors need to run.
///
/// \param[in] inputs, list of output tensor
///
/// \return void.
virtual void SetOutputs(const std::vector<Tensor *> &outputs) = 0;
/// \brief Get context for the execution flow.
///
/// \return Context pointer.
virtual Context *GetContext() = 0;
/// \brief Set context of execution run
///
/// \param[in] context, context for running
///
/// \return void.
virtual void SetContext(Context *context) = 0;
/// \brief Get callback before kernel execution.
///
/// \return KernelCallBack pointer.
virtual const KernelCallBack &GetKernelBeforeCallBack() = 0;
/// \brief Set callback before kernel execution.
///
/// \param[in] callback, callback function pointer
///
/// \return void.
virtual void SetKernelBeforeCallBack(const KernelCallBack &callback) = 0;
/// \brief Get callback after kernel execution.
///
/// \return KernelCallBack pointer.
virtual const KernelCallBack &GetKernelAfterCallBack() = 0;
/// \brief Set callback after kernel execution.
///
/// \param[in] callback, callback function pointer
///
/// \return void.
virtual void SetKernelAfterCallBack(const KernelCallBack &callback) = 0;
};
} // namespace mindspore::infer::abstract
#endif // MINDSPORE_LITE_INFER_EXECUTION_FLOW_H_

View File

@ -0,0 +1,87 @@
/**
* Copyright 2023 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_INFER_EXECUTION_PLAN_H_
#define MINDSPORE_LITE_INFER_EXECUTION_PLAN_H_
#include <vector>
#include <memory>
#include "ir/func_graph.h"
#include "infer/execution_flow.h"
namespace mindspore::infer::abstract {
class ExecutionPlan : public std::enable_shared_from_this<ExecutionPlan> {
public:
virtual ~ExecutionPlan() = default;
/// \brief Get list of execution flow in execution plan.
///
/// \return vector of ExecutionFlow.
virtual std::vector<std::shared_ptr<ExecutionFlow>> GetExecutionFLows() = 0;
/// \brief Set Execution Flows for the execution plan.
///
/// \param[in] execution_flows, the list of execution flows need run
///
/// \return void.
virtual void SetExecutionFlows(std::vector<std::shared_ptr<ExecutionFlow>> execution_flows) = 0;
/// \brief Add a Execution Flow at end of the execution plan.
///
/// \param[in] execution_flow, the execution flow need to add
///
/// \return void.
virtual void AddExecutionFlow(std::shared_ptr<ExecutionFlow> execution_flow) = 0;
/// \brief Get FuncGraph of Model need to run.
///
/// \return FuncGraph pointer.
virtual FuncGraphPtr GetFuncGraph() = 0;
/// \brief Set FuncGraph for the execution plan.
///
/// \param[in] func_graph, the graph need to run
///
/// \return void.
virtual void SetFuncGraph(FuncGraphPtr func_graph) = 0;
/// \brief Get list of inputs for the model.
///
/// \return vector of Tensor.
virtual std::vector<Tensor *> GetInputs() = 0;
/// \brief Set input tensors need to run.
///
/// \param[in] inputs, list of input tensor
///
/// \return void.
virtual void SetInputs(const std::vector<Tensor *> &inputs) = 0;
/// \brief Get list of outputs for the model.
///
/// \return vector of Tensor.
virtual std::vector<Tensor *> GetOutputs() = 0;
/// \brief Set output tensors need to run.
///
/// \param[in] inputs, list of output tensor
///
/// \return void.
virtual void SetOutputs(const std::vector<Tensor *> &outputs) = 0;
};
} // namespace mindspore::infer::abstract
#endif // MINDSPORE_LITE_INFER_EXECUTION_PLAN_H_

View File

@ -0,0 +1,58 @@
/**
* Copyright 2023 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_INFER_EXECUTOR_H_
#define MINDSPORE_LITE_INFER_EXECUTOR_H_
#include <string>
#include <memory>
#include <vector>
#include "include/api/status.h"
#include "infer/execution_flow.h"
namespace mindspore::infer::abstract {
class Executor : public std::enable_shared_from_this<Executor> {
public:
virtual ~Executor() = default;
/// \brief The Name of the Executor.
///
/// \return String name of executor.
virtual const std::string &Name() = 0;
/// \brief Prepare Execution According to ExecutionFlow.
///
/// \param[in] execution_flow Abstract Execution Plan for execute.
///
/// \return Status.
virtual Status Prepare(std::shared_ptr<ExecutionFlow> execution_flow) = 0;
/// \brief Execute According to ExecutionFlow.
///
/// \return Status.
virtual Status Execute() = 0;
/// \brief Resize Executor Kernels.
///
/// \param[in] inputs, inputs need resize
/// \param[in] dim, target shapes for resize inputs
///
/// \return Status.
virtual int Resize(const std::vector<Tensor *> &inputs, const std::vector<std::vector<int>> &dims) = 0;
};
} // namespace mindspore::infer::abstract
#endif // MINDSPORE_LITE_INFER_EXECUTOR_H_

View File

@ -0,0 +1,37 @@
/**
* Copyright 2023 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_INFER_GRAPH_COMPILER_H_
#define MINDSPORE_LITE_INFER_GRAPH_COMPILER_H_
#include <memory>
#include "infer/execution_plan.h"
namespace mindspore::infer::abstract {
class GraphCompiler : public std::enable_shared_from_this<GraphCompiler> {
public:
virtual ~GraphCompiler() = default;
/// \brief Compile FuncGraph Into ExecutionPlan.
///
/// \param[in] graph FuncGraph need to compile.
///
/// \return ExecutionPlan pointer.
virtual std::shared_ptr<ExecutionPlan> Compile(FuncGraphPtr graph) = 0;
};
} // namespace mindspore::infer::abstract
#endif // MINDSPORE_LITE_INFER_GRAPH_COMPILER_H_

View File

@ -0,0 +1,65 @@
/**
* Copyright 2023 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_INFER_GRAPH__RUNTIME_H_
#define MINDSPORE_LITE_INFER_GRAPH__RUNTIME_H_
#include <vector>
#include <memory>
#include "include/api/status.h"
#include "infer/executor.h"
#include "infer/execution_plan.h"
#include "infer/kernel_callback.h"
namespace mindspore::infer::abstract {
class GraphRuntime : public std::enable_shared_from_this<GraphRuntime> {
public:
virtual ~GraphRuntime() = default;
/// \brief Prepare Execution According to ExecutionPlan.
///
/// \param[in] execution_plan Abstract Execution Plan for execute.
///
/// \return Status.
virtual Status Prepare(std::shared_ptr<ExecutionPlan> execution_plan) = 0;
/// \brief Execute According to ExecutionPlan.
///
/// \return Status.
virtual Status Execute() = 0;
/// \brief Execute According to ExecutionPlan.
///
/// \param[in] inputs, inputs tensors for compute
/// \param[in] outputs, outputs tensors for compute
///
/// \return Status.
virtual Status Execute(const std::vector<Tensor *> &inputs, const std::vector<Tensor *> &outputs,
KernelCallBack before = nullptr, KernelCallBack after = nullptr) = 0;
/// \brief Get list of inputs for the model.
///
/// \return vector of Tensor.
virtual std::vector<Tensor *> GetInputs() = 0;
/// \brief Get list of outputs for the model.
///
/// \return vector of Tensor.
virtual std::vector<Tensor *> GetOutputs() = 0;
};
} // namespace mindspore::infer::abstract
#endif // MINDSPORE_LITE_INFER_GRAPH__RUNTIME_H_

View File

@ -0,0 +1,152 @@
/**
* Copyright 2023 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_INFER_KERNEL_H_
#define MINDSPORE_LITE_INFER_KERNEL_H_
#include <string>
#include <memory>
#include "infer/tensor.h"
#include "litert/kernel_exec.h"
namespace mindspore::infer::abstract {
using Kernel = mindspore::kernel::KernelExec;
// class Kernel : public std::enable_shared_from_this<Kernel> {
// public:
// virtual ~Kernel() = default;
// /// \brief Execute Kernel with inner inputs and outputs.
// ///
// /// \return int.
// virtual int Execute() = 0;
// /// \brief Prepare Kernel Execution.
// ///
// /// \return int.
// virtual int Prepare() = 0;
// /// \brief Resize Kernel Resource.
// ///
// /// \return int.
// virtual int ReSize() = 0;
// /// \brief Get Kernel name.
// ///
// /// \return name of kernel.
// virtual std::string name() const = 0;
// /// \brief Set Kernel name.
// ///
// /// \return void.
// virtual void set_name(const std::string &name) = 0;
// /// \brief Train Kernel.
// ///
// /// \return result of train.
// virtual int Train() = 0;
// /// \brief Is Kernel Train.
// ///
// /// \return is kernel trained.
// virtual bool IsTrain() const = 0;
// /// \brief Eval Kernel.
// ///
// /// \return int.
// virtual int Eval() = 0;
// /// \brief If the Kernel is Eval.
// ///
// /// \return bool.
// virtual bool IsEval() const = 0;
// /// \brief Set Kernel can be train.
// ///
// /// \param trainable is kernel can train
// ///
// /// \return void.
// virtual void SetTrainable(bool trainable = true) = 0;
// /// \brief Is Kernel can be train.
// ///
// /// \return bool.
// virtual bool IsTrainable() const = 0;
// /// \brief Set if kernel output is model output.
// ///
// /// \param is_model_output kernel output is model output
// ///
// /// \return void.
// virtual void set_is_model_output(bool is_model_output) = 0;
// /// \brief If kernel output is model output.
// ///
// /// \return bool.
// virtual bool is_model_output() const = 0;
// /// \brief If kernel finish infer shape.
// ///
// /// \return bool.
// virtual bool InferShapeDone() const = 0;
// /// \brief kernel op tyep.
// ///
// /// \return string of op type.
// virtual std::string type_str() = 0;
// /// \brief Set Input Tensors For Kernel.
// ///
// ///\param[in] in_tensors Abstract Input Tensor list for Kernel.
// ///
// /// \return void.
// virtual void set_in_tensors(const std::vector<Tensor *> &in_tensors) = 0;
// /// \brief Set Input Tensor For Kernel.
// ///
// ///\param[in] in_tensor Abstract Input Tensor for Kernel.
// ///\param[in] index Tensor Index for Kernel.
// ///
// /// \return void.
// virtual void set_in_tensor(Tensor *in_tensor, size_t index) = 0;
// /// \brief Set Output Tensors For Kernel.
// ///
// ///\param[in] out_tensors Abstract Output Tensor list for Kernel.
// ///
// /// \return void.
// virtual void set_out_tensors(const std::vector<Tensor *> &out_tensors) = 0;
// /// \brief Set Output Tensor For Kernel.
// ///
// ///\param[in] out_tensor Abstract Output Tensor for Kernel.
// ///\param[in] index Tensor Index for Kernel.
// ///
// /// \return void.
// virtual void set_out_tensor(Tensor *out_tensor, size_t index) = 0;
// /// \brief Get Input Tensor List Of Kernel.
// ///
// /// \return Tensor List.
// virtual const std::vector<Tensor *> &in_tensors() const = 0;
// /// \brief Get Output Tensor List Of Kernel.
// ///
// /// \return Tensor List.
// virtual const std::vector<Tensor *> &out_tensors() const = 0;
// };
} // namespace mindspore::infer::abstract
#endif // MINDSPORE_LITE_INFER_KERNEL_H_

View File

@ -0,0 +1,31 @@
/**
* Copyright 2023 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_INFER_CALLBACK_H_
#define MINDSPORE_LITE_INFER_CALLBACK_H_
#include <memory>
#include "executor/kernel_exec.h"
namespace mindspore::infer::abstract {
using KernelCallBack = mindspore::lite::KernelCallBack;
// class CallBack : public std::enable_shared_from_this<CallBack> {
// public:
// virtual ~CallBack() = default;
// };
} // namespace mindspore::infer::abstract
#endif // MINDSPORE_LITE_INFER_CALLBACK_H_

View File

@ -0,0 +1,189 @@
/**
* Copyright 2023 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_INFER_TENSOR_H_
#define MINDSPORE_LITE_INFER_TENSOR_H_
#include <vector>
#include <string>
#include <memory>
#include "core/type_id.h"
#include "src/tensor.h"
namespace mindspore::infer::abstract {
using Tensor = mindspore::lite::Tensor;
// struct LiteQuantParam {
// double scale;
// int32_t zeroPoint;
// float var_corr{1};
// float mean_corr{0};
// bool inited{false};
// std::vector<float> clusters{};
// int bitNum{8};
// int roundType{1};
// int multiplier{1};
// int dstDtype{32};
// // dynamic range
// double min{-255.0};
// double max{255.0};
// };
// enum CompressType {
// kNoCompression = 0,
// kIndexing = 1,
// kSparse = 2,
// kFSE = 3,
// kBitPacking = 4,
// kFSEInt = 5,
// kFSEInfer = 6
// };
// enum Category {
// CONST_TENSOR, // weight tensor
// CONST_SCALAR, // weight scalar
// VAR, // activation tensor
// GRAPH_INPUT,
// GRAPH_OUTPUT,
// };
// class mindspore::Allocator;
// using AllocatorPtr = std::shared_ptr<mindspore::Allocator>;
// class Tensor : public std::enable_shared_from_this<Tensor> {
// public:
// virtual ~Tensor() = default;
// virtual bool operator==(const Tensor &tensor) = 0;
// virtual void set_tensor_name(const std::string &name) = 0;
// virtual std::string tensor_name() const = 0;
// virtual TypeId data_type() const = 0;
// virtual void set_data_type(TypeId data_type) = 0;
// virtual std::vector<int> shape() const = 0;
// virtual void set_shape(const std::vector<int> &shape) = 0;
// virtual size_t Size() const = 0;
// virtual void set_allocator(AllocatorPtr allocator) = 0;
// virtual AllocatorPtr allocator() const = 0;
// virtual int MallocData(const AllocatorPtr allocator = nullptr) = 0;
// virtual void FreeData() = 0;
// virtual void *MutableData() = 0;
// virtual void *ReallocData() = 0;
// virtual void *data() = 0;
// virtual void *data() const = 0;
// // note: in the case of that old_data is valid, set_data just releases the ownership of it but not frees it. Of
// // course, you can call FreeData before calling set_data to ensure the data can be freed by current tensor.
// virtual void set_data(void *data, bool own_data = true) = 0;
// virtual void set_device_data(void *data) = 0;
// virtual void *device_data() const = 0;
// virtual Category category() const = 0;
// virtual void set_category(Category category) = 0;
// virtual void set_format(mindspore::Format format) = 0;
// virtual mindspore::Format format() const = 0;
// virtual int ref_count() const = 0;
// virtual int init_ref_count() const = 0;
// virtual void set_ref_count(int ref_count) = 0;
// virtual void set_init_ref_count(int ref_count) = 0;
// virtual void ResetRefCount() = 0;
// virtual void IncRefCount() = 0;
// virtual void DecRefCount() = 0;
// virtual std::string ToString() const = 0;
// virtual void AddQuantParam(const LiteQuantParam &quant_param) = 0;
// virtual void ClearQuantParam() = 0;
// virtual std::vector<LiteQuantParam> quant_params() const = 0;
// virtual void set_quant_params(std::vector<LiteQuantParam>) = 0;
// virtual std::vector<float> quant_clusters() const = 0;
// virtual void set_quant_clusters(const std::vector<float> &clusters) = 0;
// virtual bool IsConst() const = 0;
// virtual bool IsScalar() const = 0;
// virtual bool IsGraphInput() const = 0;
// virtual bool IsGraphOutput() const = 0;
// virtual void Prepare() = 0;
// virtual bool IsReady() const = 0;
// virtual bool own_data() const = 0;
// virtual void set_own_data(bool own_data) = 0;
// // template <typename T>
// // int Scale(float scale) {
// // T cast_scale = static_cast<T>(scale);
// // auto data = reinterpret_cast<T *>(data_);
// // if (data == nullptr) {
// // return RET_ERROR;
// // }
// // int length = ElementsNum();
// // for (int i = 0; i < length; i++) {
// // data[i] *= cast_scale;
// // }
// // scale_ *= scale;
// // return RET_OK;
// // }
// virtual float get_scale() const = 0;
// virtual void set_scale(float scale) = 0;
// virtual CompressType get_compress_type() const = 0;
// virtual void set_compress_type(CompressType compression_type) = 0;
// virtual void set_compressed_size(size_t compressed_size) = 0;
// virtual bool IsScale() const = 0;
// };
} // namespace mindspore::infer::abstract
#endif // MINDSPORE_LITE_INFER_TENSOR_H_

View File

@ -121,7 +121,7 @@ class MS_API BenchmarkFlags : public virtual FlagParser {
AddFlag(&BenchmarkFlags::model_type_, "modelType", "Input model type. MindIR | MindIR_Lite", "MindIR");
AddFlag(&BenchmarkFlags::in_data_file_, "inDataFile", "Input data file, if not set, use random input", "");
AddFlag(&BenchmarkFlags::config_file_, "configFile", "Config file", "");
AddFlag(&BenchmarkFlags::device_, "device", "CPU | GPU | NPU | Ascend310 | Ascend310P", "CPU");
AddFlag(&BenchmarkFlags::device_, "device", "CPU | GPU | NPU | Ascend310 | Ascend310P | Auto", "CPU");
AddFlag(&BenchmarkFlags::provider_, "provider", "device provider litert | tensorrt", "litert");
AddFlag(&BenchmarkFlags::cpu_bind_mode_, "cpuBindMode", "Input 0 for NO_BIND, 1 for HIGHER_CPU, 2 for MID_CPU.", 1);
// MarkPerformance

View File

@ -447,8 +447,13 @@ int BenchmarkUnifiedApi::InitMSContext(const std::shared_ptr<mindspore::Context>
#endif
auto &device_list = context->MutableDeviceInfo();
// if (flags_->device_ == "Auto") {
// std::shared_ptr<AutoDeviceInfo> auto_device_info = std::make_shared<AutoDeviceInfo>();
// device_list.push_back(auto_device_info);
// device_list = auto_device_info->MutableDeviceInfo();
// }
if (flags_->device_ == "GPU") {
if (flags_->device_ == "GPU" || flags_->device_ == "Auto") {
std::shared_ptr<GPUDeviceInfo> gpu_device_info = std::make_shared<GPUDeviceInfo>();
gpu_device_info->SetEnableFP16(flags_->enable_fp16_);
uint32_t device_id = 0;
@ -477,14 +482,14 @@ int BenchmarkUnifiedApi::InitMSContext(const std::shared_ptr<mindspore::Context>
device_list.push_back(gpu_device_info);
}
if (flags_->device_ == "NPU") {
if (flags_->device_ == "NPU" || flags_->device_ == "Auto") {
std::shared_ptr<KirinNPUDeviceInfo> npu_device_info = std::make_shared<KirinNPUDeviceInfo>();
npu_device_info->SetEnableFP16(flags_->enable_fp16_);
npu_device_info->SetFrequency(kFrequencyDefault);
device_list.push_back(npu_device_info);
}
if (flags_->device_ == "Ascend310" || flags_->device_ == "Ascend310P") {
if (flags_->device_ == "Ascend310" || flags_->device_ == "Ascend310P" || flags_->device_ == "Auto") {
uint32_t device_id = 0;
auto device_id_env = std::getenv("ASCEND_DEVICE_ID");
if (device_id_env != nullptr) {

View File

@ -3,7 +3,6 @@
project_path=$1
build_path=$2
vendor_name=mslite
if [[ ! -d "$project_path" ]]; then
echo "[ERROR] No projcet path is provided"
exit 1