forked from mindspore-Ecosystem/mindspore
!34168 [MS][LITE]support coreml delegate
Merge pull request !34168 from XianglongZeng/coreml_2
This commit is contained in:
commit
2e32daa9e2
|
@ -62,7 +62,8 @@
|
|||
"mindspore/mindspore/lite/src/runtime/thread_pool.c" "readability/casting"
|
||||
"mindspore/mindspore/lite/src/runtime/thread_pool.c" "runtime/arrays"
|
||||
"mindspore/mindspore/lite/src/runtime/thread_pool.c" "runtime/int"
|
||||
"mindspore/mindspore/lite/src/common/ops/ops_def.cc" "runtime/int"
|
||||
"mindspore/mindspore/lite/src/common/ops/ops_def.cc" "runtime/int"
|
||||
"mindspore/mindspore/lite/src/runtime/delegate/coreml/coreml_executor.h" "readability/casting"
|
||||
"mindspore/mindspore/lite/examples/runtime_gpu_extend/src/cl" "legal/copyright"
|
||||
"mindspore/mindspore/lite/examples/runtime_gpu_extend/src/cl" "readability/casting"
|
||||
"mindspore/mindspore/lite/examples/runtime_gpu_extend/src/cl" "readability/fn_size"
|
||||
|
|
|
@ -45,25 +45,42 @@ else()
|
|||
endif()
|
||||
|
||||
if(BUILD_LITE)
|
||||
set(PROTOBUF_PATCH_ROOT ${TOP_DIR}/third_party/patch/protobuf)
|
||||
set(PROTOBUF_PATCH_ROOT ${TOP_DIR}/third_party/patch/protobuf)
|
||||
else()
|
||||
set(PROTOBUF_PATCH_ROOT ${CMAKE_SOURCE_DIR}/third_party/patch/protobuf)
|
||||
set(PROTOBUF_PATCH_ROOT ${CMAKE_SOURCE_DIR}/third_party/patch/protobuf)
|
||||
endif()
|
||||
|
||||
mindspore_add_pkg(protobuf_arm
|
||||
VER 3.13.0
|
||||
LIBS protobuf
|
||||
URL ${REQ_URL}
|
||||
MD5 ${MD5}
|
||||
CMAKE_PATH cmake/
|
||||
CMAKE_OPTION
|
||||
-Dprotobuf_BUILD_TESTS=OFF
|
||||
-Dprotobuf_BUILD_SHARED_LIBS=OFF
|
||||
-DCMAKE_BUILD_TYPE=Release
|
||||
-DCMAKE_C_COMPILER=${CMAKE_C_COMPILER}
|
||||
-DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}
|
||||
-Dprotobuf_WITH_ZLIB=OFF
|
||||
PATCHES ${PROTOBUF_PATCH_ROOT}/CVE-2021-22570.patch)
|
||||
if(APPLE)
|
||||
mindspore_add_pkg(protobuf_arm
|
||||
VER 3.13.0
|
||||
LIBS protobuf
|
||||
URL ${REQ_URL}
|
||||
MD5 ${MD5}
|
||||
CMAKE_PATH cmake/
|
||||
CMAKE_OPTION
|
||||
-Dprotobuf_BUILD_TESTS=OFF
|
||||
-Dprotobuf_BUILD_SHARED_LIBS=OFF
|
||||
-DCMAKE_BUILD_TYPE=Release
|
||||
-Dprotobuf_WITH_ZLIB=OFF
|
||||
-DCMAKE_OSX_SYSROOT=${CMAKE_OSX_SYSROOT}
|
||||
-DCMAKE_OSX_DEPLOYMENT_TARGET=${CMAKE_OSX_DEPLOYMENT_TARGET}
|
||||
PATCHES ${PROTOBUF_PATCH_ROOT}/CVE-2021-22570.patch)
|
||||
else()
|
||||
mindspore_add_pkg(protobuf_arm
|
||||
VER 3.13.0
|
||||
LIBS protobuf
|
||||
URL ${REQ_URL}
|
||||
MD5 ${MD5}
|
||||
CMAKE_PATH cmake/
|
||||
CMAKE_OPTION
|
||||
-Dprotobuf_BUILD_TESTS=OFF
|
||||
-Dprotobuf_BUILD_SHARED_LIBS=OFF
|
||||
-DCMAKE_BUILD_TYPE=Release
|
||||
-DCMAKE_C_COMPILER=${CMAKE_C_COMPILER}
|
||||
-DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}
|
||||
-Dprotobuf_WITH_ZLIB=OFF
|
||||
PATCHES ${PROTOBUF_PATCH_ROOT}/CVE-2021-22570.patch)
|
||||
endif()
|
||||
|
||||
include_directories(${protobuf_arm_INC})
|
||||
add_library(mindspore::protobuf_arm ALIAS protobuf_arm::protobuf)
|
||||
|
|
|
@ -80,6 +80,9 @@ endif()
|
|||
if(DEFINED ENV{MSLITE_ENABLE_NPU})
|
||||
set(MSLITE_ENABLE_NPU $ENV{MSLITE_ENABLE_NPU})
|
||||
endif()
|
||||
if(DEFINED ENV{MSLITE_ENABLE_COREML})
|
||||
set(MSLITE_ENABLE_COREML $ENV{MSLITE_ENABLE_COREML})
|
||||
endif()
|
||||
if(DEFINED ENV{MSLITE_ENABLE_TRAIN})
|
||||
set(MSLITE_ENABLE_TRAIN $ENV{MSLITE_ENABLE_TRAIN})
|
||||
endif()
|
||||
|
@ -284,6 +287,10 @@ else()
|
|||
set(MSLITE_ENABLE_NPU off)
|
||||
endif()
|
||||
|
||||
if(NOT APPLE)
|
||||
set(MSLITE_ENABLE_COREML off)
|
||||
endif()
|
||||
|
||||
if(DEFINED ENV{MSLITE_ENABLE_RUNTIME_GLOG})
|
||||
set(MSLITE_ENABLE_RUNTIME_GLOG $ENV{MSLITE_ENABLE_RUNTIME_GLOG})
|
||||
endif()
|
||||
|
@ -383,6 +390,7 @@ message(STATUS "************MindSpore Lite Build Option:************")
|
|||
message(STATUS "\tMSLITE_GPU_BACKEND = \t${MSLITE_GPU_BACKEND}")
|
||||
message(STATUS "\tMSLITE_REGISTRY_DEVICE = \t${MSLITE_REGISTRY_DEVICE}")
|
||||
message(STATUS "\tMSLITE_ENABLE_NPU = \t${MSLITE_ENABLE_NPU}")
|
||||
message(STATUS "\tMSLITE_ENABLE_COREML = \t${MSLITE_ENABLE_COREML}")
|
||||
message(STATUS "\tMSLITE_ENABLE_TRAIN = \t${MSLITE_ENABLE_TRAIN}")
|
||||
message(STATUS "\tMSLITE_MICRO_PLATFORM = \t${MSLITE_MICRO_PLATFORM}")
|
||||
message(STATUS "\tMSLITE_ENABLE_SSE = \t${MSLITE_ENABLE_SSE}")
|
||||
|
@ -434,10 +442,10 @@ if(MSLITE_ENABLE_EXPERIMENTAL_KERNEL)
|
|||
add_compile_definitions(MSLITE_ENABLE_EXPERIMENTAL_KERNEL)
|
||||
endif()
|
||||
|
||||
if(((MSLITE_GPU_BACKEND STREQUAL tensorrt) OR MSLITE_ENABLE_NPU) AND (
|
||||
if(((MSLITE_GPU_BACKEND STREQUAL tensorrt) OR MSLITE_ENABLE_NPU OR MSLITE_ENABLE_COREML) AND (
|
||||
NOT MSLITE_ENABLE_DELEGATE))
|
||||
message(FATAL_ERROR "If MSLITE_ENABLE_DELEGATE use is configured as off, MSLITE_ENABLE_NPU must also be configured
|
||||
as off and MSLITE_GPU_BACKEND nor can it be configured as tensorrt.")
|
||||
message(FATAL_ERROR "If MSLITE_ENABLE_DELEGATE use is configured as off, MSLITE_ENABLE_NPU and MSLITE_ENABLE_COREML
|
||||
must also be configured as off and MSLITE_GPU_BACKEND nor can it be configured as tensorrt.")
|
||||
endif()
|
||||
|
||||
if(MSLITE_ENABLE_HIGH_PERFORMANCE)
|
||||
|
@ -572,6 +580,27 @@ if(MSLITE_GPU_BACKEND STREQUAL opencl)
|
|||
set(MSLITE_DEPS_OPENCL on)
|
||||
endif()
|
||||
|
||||
function(find_required_package pkg_name)
|
||||
find_package(${pkg_name})
|
||||
if(NOT ${pkg_name}_FOUND)
|
||||
message(FATAL_ERROR "Required package ${pkg_name} not found, "
|
||||
"please install the package and try building MindSpore again.")
|
||||
endif()
|
||||
endfunction()
|
||||
|
||||
if(MSLITE_ENABLE_COREML)
|
||||
if(PLATFORM_ARM32)
|
||||
message(FATAL_ERROR "CoreML not support arm32 platform!")
|
||||
endif()
|
||||
add_compile_definitions(ENABLE_COREML)
|
||||
find_required_package(Patch)
|
||||
include(${TOP_DIR}/cmake/external_libs/protobuf.cmake)
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -arch arm64")
|
||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -arch arm64")
|
||||
set(CMAKE_OSX_DEPLOYMENT_TARGET "11.0")
|
||||
include(${TOP_DIR}/cmake/external_libs/protobuf_arm.cmake)
|
||||
endif()
|
||||
|
||||
if(MSLITE_ENABLE_CONVERTER OR MSLITE_MINDDATA_IMPLEMENT STREQUAL "full" OR MSLITE_MINDDATA_IMPLEMENT STREQUAL "wrapper"
|
||||
OR MSLITE_ENABLE_TOOLS OR MSLITE_ENABLE_KERNEL_EXECUTOR)
|
||||
# include(${TOP_DIR}/cmake/external_libs/json.cmake)
|
||||
|
@ -649,14 +678,6 @@ if((MSLITE_ENABLE_CONVERTER OR MSLITE_ENABLE_RUNTIME_GLOG))
|
|||
set(MSLITE_DEPS_GLOG on)
|
||||
endif()
|
||||
|
||||
function(find_required_package pkg_name)
|
||||
find_package(${pkg_name})
|
||||
if(NOT ${pkg_name}_FOUND)
|
||||
message(FATAL_ERROR "Required package ${pkg_name} not found, "
|
||||
"please install the package and try building MindSpore again.")
|
||||
endif()
|
||||
endfunction()
|
||||
|
||||
if(MSLITE_ENABLE_CONVERTER OR MSLITE_ENABLE_KERNEL_EXECUTOR)
|
||||
find_required_package(Patch)
|
||||
# include(${TOP_DIR}/cmake/external_libs/protobuf.cmake)
|
||||
|
|
|
@ -429,6 +429,10 @@ build_lite() {
|
|||
mkdir -p ${BASEPATH}/output
|
||||
cp -r ${BASEPATH}/mindspore/lite/build/src/Release-*/mindspore-lite.framework ${BASEPATH}/output/mindspore-lite.framework
|
||||
cd ${BASEPATH}/output
|
||||
local protobuf_arm_lib=${BASEPATH}/mindspore/lite/build/_deps/protobuf_arm-src/_build/libprotobuf-lite.a
|
||||
if [ -e "$protobuf_arm_lib" ]; then
|
||||
cp $protobuf_arm_lib ${BASEPATH}/output/mindspore-lite.framework/
|
||||
fi
|
||||
tar -zcvf ${pkg_name}.tar.gz mindspore-lite.framework/
|
||||
sha256sum ${pkg_name}.tar.gz > ${pkg_name}.tar.gz.sha256
|
||||
rm -r mindspore-lite.framework
|
||||
|
|
|
@ -446,6 +446,10 @@ if(APPLE)
|
|||
${MINDSPORE_LITE_PUB_HDRS_MINDAPI_HDRS}
|
||||
${MINDSPORE_LITE_PUB_HDRS_IR_HDRS}
|
||||
)
|
||||
if(MSLITE_ENABLE_COREML)
|
||||
add_subdirectory(runtime/delegate/coreml)
|
||||
target_link_libraries(mindspore-lite_static coreml_proto_mid coreml_kernel_mid)
|
||||
endif()
|
||||
add_dependencies(mindspore-lite_static fbs_inner_src)
|
||||
else()
|
||||
add_library(mindspore-lite_static STATIC $<TARGET_OBJECTS:lite_src_mid>)
|
||||
|
|
|
@ -0,0 +1,15 @@
|
|||
file(GLOB PROTO_FILE "" ${TOP_DIR}/third_party/proto/coreml/*.proto)
|
||||
ms_protobuf_generate(PROTO_SRCS PROTO_HDRS ${PROTO_FILE})
|
||||
add_library(coreml_proto_mid OBJECT ${PROTO_SRCS})
|
||||
include_directories(${CMAKE_BINARY_DIR}/proto)
|
||||
|
||||
file(GLOB_RECURSE COREML_RUNTIME_SRC
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/*.mm
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/*.cc
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/op/*.cc
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/pass/*.cc
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../delegate_utils.cc
|
||||
)
|
||||
add_library(coreml_kernel_mid OBJECT ${COREML_RUNTIME_SRC})
|
||||
add_dependencies(coreml_kernel_mid fbs_src)
|
||||
target_link_libraries(coreml_kernel_mid coreml_proto_mid)
|
|
@ -0,0 +1,55 @@
|
|||
/**
|
||||
* Copyright 2022 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_COREML_COREML_DELEGATE_H_
|
||||
#define MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_COREML_COREML_DELEGATE_H_
|
||||
|
||||
#include <vector>
|
||||
#include <map>
|
||||
#include "include/api/delegate.h"
|
||||
#include "include/context.h"
|
||||
#include "src/runtime/delegate/coreml/op/coreml_op.h"
|
||||
#include "src/runtime/delegate/coreml/pass/coreml_pass_manager.h"
|
||||
|
||||
namespace mindspore {
|
||||
class CoreMLDelegate : public Delegate {
|
||||
public:
|
||||
CoreMLDelegate() = default;
|
||||
|
||||
~CoreMLDelegate() override;
|
||||
|
||||
bool IsSupportCoreML() const;
|
||||
|
||||
Status Init() override;
|
||||
|
||||
Status Build(DelegateModel<schema::Primitive> *model) override;
|
||||
|
||||
protected:
|
||||
CoreMLOp *GetOP(kernel::Kernel *kernel, const schema::Primitive *primitive);
|
||||
|
||||
kernel::Kernel *CreateCoreMLGraph(const std::vector<CoreMLOp *> &ops, DelegateModel<schema::Primitive> *model,
|
||||
KernelIter from, KernelIter end);
|
||||
|
||||
Status AddPasses();
|
||||
|
||||
protected:
|
||||
int graph_index_ = 0;
|
||||
CoreMLPassManager *pass_manager_ = nullptr;
|
||||
std::map<schema::PrimitiveType, CoreMLGetOp> op_func_lists_;
|
||||
};
|
||||
} // namespace mindspore
|
||||
|
||||
#endif // MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_COREML_COREML_DELEGATE_H_
|
|
@ -0,0 +1,233 @@
|
|||
/**
|
||||
* Copyright 2022 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "src/runtime/delegate/coreml/coreml_delegate.h"
|
||||
#include "include/errorcode.h"
|
||||
#include "src/common/prim_util.h"
|
||||
#include "src/runtime/delegate/coreml/op/coreml_op.h"
|
||||
#include "src/runtime/delegate/coreml/op/activation_coreml.h"
|
||||
#include "src/runtime/delegate/coreml/op/transpose_coreml.h"
|
||||
#include "src/runtime/delegate/coreml/op/convolution_coreml.h"
|
||||
#include "src/runtime/delegate/coreml/op/deconvolution_coreml.h"
|
||||
#include "src/runtime/delegate/coreml/op/avg_pooling_coreml.h"
|
||||
#include "src/runtime/delegate/coreml/op/max_pooling_coreml.h"
|
||||
#include "src/runtime/delegate/coreml/op/arithmetic_coreml.h"
|
||||
#include "src/runtime/delegate/coreml/op/resize_coreml.h"
|
||||
#include "src/runtime/delegate/coreml/op/reshape_coreml.h"
|
||||
#include "src/runtime/delegate/coreml/op/matmul_coreml.h"
|
||||
#include "src/runtime/delegate/coreml/op/concat_coreml.h"
|
||||
#include "src/runtime/delegate/coreml/op/unsqueeze_coreml.h"
|
||||
#include "src/runtime/delegate/coreml/op/gather_coreml.h"
|
||||
#include "src/runtime/delegate/coreml/op/shape_coreml.h"
|
||||
#include "src/runtime/delegate/coreml/op/softmax_coreml.h"
|
||||
#include "src/runtime/delegate/coreml/op/flatten_coreml.h"
|
||||
#include "src/runtime/delegate/coreml/coreml_graph.h"
|
||||
#include "src/runtime/delegate/delegate_utils.h"
|
||||
#include "src/runtime/delegate/coreml/pass/coreml_format_trans_pass.h"
|
||||
#include "src/runtime/delegate/coreml/pass/coreml_trans_extend_pass.h"
|
||||
#include "src/runtime/delegate/coreml/pass/coreml_fusion_pass.h"
|
||||
|
||||
using mindspore::lite::RET_ERROR;
|
||||
using mindspore::lite::RET_OK;
|
||||
|
||||
namespace mindspore {
|
||||
CoreMLDelegate::~CoreMLDelegate() {
|
||||
if (pass_manager_ != nullptr) {
|
||||
pass_manager_->Clear();
|
||||
delete pass_manager_;
|
||||
pass_manager_ = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
bool CoreMLDelegate::IsSupportCoreML() {
|
||||
if (@available(iOS 11, *)) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
Status CoreMLDelegate::AddPasses() {
|
||||
auto format_trans_pass = new (std::nothrow) CoreMLFormatTransPass();
|
||||
if (format_trans_pass == nullptr) {
|
||||
MS_LOG(ERROR) << "New CoreMLFormatTransPass failed.";
|
||||
return mindspore::kLiteNullptr;
|
||||
}
|
||||
pass_manager_->AddPass(format_trans_pass);
|
||||
|
||||
auto trans_extend_pass = new (std::nothrow) CoreMLTransExtendPass();
|
||||
if (trans_extend_pass == nullptr) {
|
||||
MS_LOG(ERROR) << "New CoreMLTransExtendPass failed.";
|
||||
return mindspore::kLiteNullptr;
|
||||
}
|
||||
pass_manager_->AddPass(trans_extend_pass);
|
||||
|
||||
auto fusion_pass = new (std::nothrow) CoreMLFusionPass();
|
||||
if (fusion_pass == nullptr) {
|
||||
MS_LOG(ERROR) << "New CoreMLFusionPass failed.";
|
||||
return mindspore::kLiteNullptr;
|
||||
}
|
||||
pass_manager_->AddPass(fusion_pass);
|
||||
return mindspore::kSuccess;
|
||||
}
|
||||
|
||||
Status CoreMLDelegate::Init() {
|
||||
if (!IsSupportCoreML()) {
|
||||
return mindspore::kLiteNotSupport;
|
||||
}
|
||||
pass_manager_ = new (std::nothrow) CoreMLPassManager();
|
||||
if (pass_manager_ == nullptr) {
|
||||
MS_LOG(ERROR) << "New coreml pass manager failed.";
|
||||
return mindspore::kLiteNullptr;
|
||||
}
|
||||
auto ret = AddPasses();
|
||||
if (ret != mindspore::kSuccess) {
|
||||
MS_LOG(ERROR) << "add passes for coreml pass manager failed.";
|
||||
return ret;
|
||||
}
|
||||
op_func_lists_.clear();
|
||||
op_func_lists_ = {
|
||||
{schema::PrimitiveType_Activation, GetCoreMLOp<ActivationCoreMLOp>},
|
||||
{schema::PrimitiveType_Transpose, GetCoreMLOp<TransposeCoreMLOp>},
|
||||
{schema::PrimitiveType_Conv2DFusion, GetCoreMLOp<ConvolutionCoreMLOp>},
|
||||
{schema::PrimitiveType_Conv2dTransposeFusion, GetCoreMLOp<DeconvolutionCoreMLOp>},
|
||||
{schema::PrimitiveType_AvgPoolFusion, GetCoreMLOp<AvgPoolingCoreMLOp>},
|
||||
{schema::PrimitiveType_MaxPoolFusion, GetCoreMLOp<MaxPoolingCoreMLOp>},
|
||||
{schema::PrimitiveType_AddFusion, GetCoreMLOp<ArithmeticCoreMLOp>},
|
||||
{schema::PrimitiveType_MulFusion, GetCoreMLOp<ArithmeticCoreMLOp>},
|
||||
{schema::PrimitiveType_Reshape, GetCoreMLOp<ReshapeCoreMLOp>},
|
||||
{schema::PrimitiveType_Resize, GetCoreMLOp<ResizeCoreMLOp>},
|
||||
{schema::PrimitiveType_Concat, GetCoreMLOp<ConcatCoreMLOp>},
|
||||
{schema::PrimitiveType_Shape, GetCoreMLOp<ShapeCoreMLOp>},
|
||||
{schema::PrimitiveType_Gather, GetCoreMLOp<GatherCoreMLOp>},
|
||||
{schema::PrimitiveType_Unsqueeze, GetCoreMLOp<UnsqueezeCoreMLOp>},
|
||||
{schema::PrimitiveType_MatMulFusion, GetCoreMLOp<MatMulCoreMLOp>},
|
||||
{schema::PrimitiveType_Softmax, GetCoreMLOp<SoftmaxCoreMLOp>},
|
||||
{schema::PrimitiveType_Flatten, GetCoreMLOp<FlattenCoreMLOp>},
|
||||
};
|
||||
return mindspore::kSuccess;
|
||||
}
|
||||
|
||||
Status CoreMLDelegate::Build(DelegateModel<schema::Primitive> *model) {
|
||||
KernelIter from, end;
|
||||
std::vector<CoreMLOp *> coreml_ops;
|
||||
for (KernelIter iter = model->BeginKernelIterator(); iter != model->EndKernelIterator(); iter++) {
|
||||
kernel::Kernel *kernel = *iter;
|
||||
auto coreml_op = GetOP(kernel, model->GetPrimitive(kernel));
|
||||
if (coreml_op != nullptr) {
|
||||
// If coreml_op does not equal nullptr, this kernel can be supported by delegate
|
||||
if (coreml_ops.size() == 0) {
|
||||
from = iter;
|
||||
}
|
||||
coreml_ops.push_back(coreml_op);
|
||||
end = iter;
|
||||
} else {
|
||||
if (!coreml_ops.empty()) {
|
||||
auto coreml_graph_kernel = CreateCoreMLGraph(coreml_ops, model, from, end);
|
||||
if (coreml_graph_kernel == nullptr) {
|
||||
MS_LOG(ERROR) << "Create CoreML Graph failed.";
|
||||
return mindspore::kLiteNullptr;
|
||||
}
|
||||
iter = model->Replace(from, end + 1, coreml_graph_kernel);
|
||||
coreml_ops.clear();
|
||||
}
|
||||
}
|
||||
}
|
||||
if (!coreml_ops.empty()) {
|
||||
auto coreml_graph_kernel = CreateCoreMLGraph(coreml_ops, model, from, end);
|
||||
if (coreml_graph_kernel == nullptr) {
|
||||
MS_LOG(ERROR) << "Create CoreML Graph failed.";
|
||||
return mindspore::kLiteNullptr;
|
||||
}
|
||||
model->Replace(from, end + 1, coreml_graph_kernel);
|
||||
coreml_ops.clear();
|
||||
}
|
||||
MS_LOG(ERROR) << "CoreML graph build success!";
|
||||
return mindspore::kSuccess;
|
||||
}
|
||||
|
||||
CoreMLOp *CoreMLDelegate::GetOP(kernel::Kernel *kernel, const schema::Primitive *primitive) {
|
||||
if (primitive == nullptr) {
|
||||
MS_LOG(ERROR) << "primitive is NULL!";
|
||||
return nullptr;
|
||||
}
|
||||
if (kernel == nullptr) {
|
||||
MS_LOG(ERROR) << "kernel is NULL!";
|
||||
return nullptr;
|
||||
}
|
||||
auto name = kernel->name();
|
||||
CoreMLOp *coreml_op = nullptr;
|
||||
auto node_type = primitive->value_type();
|
||||
if (op_func_lists_.find(node_type) != op_func_lists_.end()) {
|
||||
coreml_op = op_func_lists_[node_type](primitive, kernel->inputs(), kernel->outputs(), name);
|
||||
} else {
|
||||
MS_LOG(DEBUG) << "Unsupported op type for CoreML.";
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
for (int i = 0; i < kernel->inputs().size(); i++) {
|
||||
mindspore::MSTensor tensor = kernel->inputs()[i];
|
||||
if (tensor.DataType() == DataType::kNumberTypeFloat16 && tensor.Data() == nullptr) {
|
||||
tensor.SetDataType(DataType::kNumberTypeFloat32);
|
||||
}
|
||||
}
|
||||
for (int i = 0; i < kernel->outputs().size(); i++) {
|
||||
mindspore::MSTensor tensor = kernel->outputs()[i];
|
||||
if (tensor.DataType() == DataType::kNumberTypeFloat16) {
|
||||
tensor.SetDataType(DataType::kNumberTypeFloat32);
|
||||
}
|
||||
}
|
||||
|
||||
if (coreml_op != nullptr) {
|
||||
MS_LOG(DEBUG) << "kernel: [" << kernel->name().c_str() << "] op success. "
|
||||
<< "op_type: " << lite::PrimitiveCurVersionTypeName(kernel->type());
|
||||
}
|
||||
return coreml_op;
|
||||
}
|
||||
|
||||
kernel::Kernel *CoreMLDelegate::CreateCoreMLGraph(const std::vector<CoreMLOp *> &ops,
|
||||
DelegateModel<schema::Primitive> *model, KernelIter from,
|
||||
KernelIter end) {
|
||||
auto in_tensors = lite::GetGraphInTensors(ops, nullptr);
|
||||
auto out_tensors = lite::GraphOutTensors<CoreMLOp>(ops, model, from, end);
|
||||
auto graph_kernel = new (std::nothrow) CoreMLGraph(ops, in_tensors, out_tensors);
|
||||
if (graph_kernel == nullptr) {
|
||||
MS_LOG(ERROR) << "New CoreML Graph failed.";
|
||||
return nullptr;
|
||||
}
|
||||
graph_kernel->set_name("CoreMLGraph" + std::to_string(graph_index_++));
|
||||
|
||||
// 1. For every op, find pre and next ops
|
||||
lite::FindPreNextOps<CoreMLOp>(ops);
|
||||
|
||||
// 2. Run pass
|
||||
auto ret = pass_manager_->RunPass(graph_kernel);
|
||||
if (ret != RET_OK) {
|
||||
delete graph_kernel;
|
||||
MS_LOG(ERROR) << "CoreML Graph run pass failed. This function mainly solves the problem that the format is "
|
||||
"inconsistent and requires interpolation transpose operators.";
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// 3. CoreMLGraph init, build and compile the MLModel
|
||||
ret = graph_kernel->Init();
|
||||
if (ret != RET_OK) {
|
||||
delete graph_kernel;
|
||||
MS_LOG(ERROR) << "CoreML subgraph Init failed.";
|
||||
return nullptr;
|
||||
}
|
||||
return graph_kernel;
|
||||
}
|
||||
} // namespace mindspore
|
|
@ -0,0 +1,50 @@
|
|||
/**
|
||||
* Copyright 2022 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#ifndef MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_COREML_COREML_EXECUTOR_H_
|
||||
#define MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_COREML_COREML_EXECUTOR_H_
|
||||
|
||||
#import <CoreML/CoreML.h>
|
||||
#import <Foundation/Foundation.h>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include "include/api/types.h"
|
||||
|
||||
API_AVAILABLE(ios(11))
|
||||
@interface InputFeatureProvider : NSObject <MLFeatureProvider> {
|
||||
const std::vector<mindspore::MSTensor>* _inputs;
|
||||
NSSet* _featureNames;
|
||||
}
|
||||
|
||||
- (instancetype)initWithInputs:(const std::vector<mindspore::MSTensor>*)inputs
|
||||
coreMLVersion:(int)coreMLVersion;
|
||||
- (NSSet<NSString*>*)featureNames;
|
||||
- (MLFeatureValue *)featureValueForName:(NSString *)featureName;
|
||||
|
||||
@property(nonatomic, readonly) int coreMLVersion;
|
||||
@end
|
||||
|
||||
API_AVAILABLE(ios(11))
|
||||
@interface CoreMLExecutor : NSObject
|
||||
|
||||
- (bool)ExecuteWithInputs:(const std::vector<mindspore::MSTensor>&)inputs
|
||||
outputs:(const std::vector<mindspore::MSTensor>&)outputs;
|
||||
|
||||
- (bool)loadModelC:(NSURL*)compileUrl;
|
||||
|
||||
@property MLModel* model;
|
||||
@property(nonatomic, readonly) int coreMLVersion;
|
||||
@end
|
||||
#endif // MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_COREML_COREML_EXECUTOR_H_
|
|
@ -0,0 +1,198 @@
|
|||
/**
|
||||
* Copyright 2022 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#import "src/runtime/delegate/coreml/coreml_executor.h"
|
||||
#include <fstream>
|
||||
#include <iostream>
|
||||
|
||||
namespace {
|
||||
// The subgraph split can cause the change of tensor name. This function is used to get the original name.
|
||||
std::string GetOrgFeatureName(const std::string &input_name) {
|
||||
auto org_name = input_name;
|
||||
std::string pattern_1 = "_duplicate_";
|
||||
auto pos_1 = input_name.find(pattern_1);
|
||||
if (pos_1 != std::string::npos) {
|
||||
org_name = input_name.substr(pos_1 + pattern_1.length());
|
||||
return org_name;
|
||||
}
|
||||
std::string pattern_2 = "_duplicate";
|
||||
auto pos_2 = input_name.find(pattern_2);
|
||||
if (pos_2 != std::string::npos) {
|
||||
org_name = input_name.substr(0, pos_2);
|
||||
return org_name;
|
||||
}
|
||||
return org_name;
|
||||
}
|
||||
} // namespace
|
||||
|
||||
@implementation InputFeatureProvider
|
||||
|
||||
- (instancetype)initWithInputs:(const std::vector<mindspore::MSTensor>*)inputs
|
||||
coreMLVersion:(int)coreMLVersion {
|
||||
self = [super init];
|
||||
_inputs = inputs;
|
||||
_coreMLVersion = coreMLVersion;
|
||||
NSMutableArray* names = [[NSMutableArray alloc] init];
|
||||
for (auto& input : *_inputs) {
|
||||
auto input_name = GetOrgFeatureName(input.Name());
|
||||
[names addObject:[NSString stringWithCString:input_name.c_str()
|
||||
encoding:[NSString defaultCStringEncoding]]];
|
||||
}
|
||||
_featureNames = [NSSet setWithArray:names];
|
||||
return self;
|
||||
}
|
||||
|
||||
- (NSSet<NSString*>*)featureNames{ return _featureNames; }
|
||||
|
||||
- (MLFeatureValue*)featureValueForName:(NSString*)featureName {
|
||||
for (auto input : *_inputs) {
|
||||
auto input_name = GetOrgFeatureName(input.Name());
|
||||
if ([featureName cStringUsingEncoding:NSUTF8StringEncoding] == input_name) {
|
||||
NSArray* shape;
|
||||
NSArray* strides;
|
||||
int tensorRank = input.Shape().size();
|
||||
switch(tensorRank) {
|
||||
case 1:
|
||||
shape = @[
|
||||
@(input.Shape()[0])
|
||||
];
|
||||
strides = @[
|
||||
@1
|
||||
];
|
||||
break;
|
||||
case 2:
|
||||
shape = @[
|
||||
@(input.Shape()[0]),
|
||||
@(input.Shape()[1])
|
||||
];
|
||||
strides = @[
|
||||
@(input.Shape()[1]),
|
||||
@1
|
||||
];
|
||||
break;
|
||||
case 3:
|
||||
shape = @[
|
||||
@(input.Shape()[0]),
|
||||
@(input.Shape()[1]),
|
||||
@(input.Shape()[2])
|
||||
];
|
||||
strides = @[
|
||||
@(input.Shape()[2] * input.Shape()[1]),
|
||||
@(input.Shape()[2]),
|
||||
@1
|
||||
];
|
||||
break;
|
||||
case 4:
|
||||
shape = @[
|
||||
@(input.Shape()[0]),
|
||||
@(input.Shape()[1]),
|
||||
@(input.Shape()[2]),
|
||||
@(input.Shape()[3])
|
||||
];
|
||||
strides = @[
|
||||
@(input.Shape()[3] * input.Shape()[2] * input.Shape()[1]),
|
||||
@(input.Shape()[3] * input.Shape()[2]),
|
||||
@(input.Shape()[3]),
|
||||
@1
|
||||
];
|
||||
break;
|
||||
default:
|
||||
NSLog(@"The rank of input tensor:%@ is unsupported!", featureName);
|
||||
}
|
||||
|
||||
NSError* error = nil;
|
||||
MLMultiArray* mlArray = [[MLMultiArray alloc] initWithDataPointer:(float*)input.MutableData()
|
||||
shape:shape
|
||||
dataType:MLMultiArrayDataTypeFloat32
|
||||
strides:strides
|
||||
deallocator:(^(void* bytes){
|
||||
})error:&error];
|
||||
if (error != nil) {
|
||||
NSLog(@"Failed to create MLMultiArray for input tensor %@ error: %@!", featureName,
|
||||
[error localizedDescription]);
|
||||
return nil;
|
||||
}
|
||||
auto* mlFeatureValue = [MLFeatureValue featureValueWithMultiArray:mlArray];
|
||||
return mlFeatureValue;
|
||||
}
|
||||
}
|
||||
|
||||
NSLog(@"Input tensor %@ not found!", featureName);
|
||||
return nil;
|
||||
}
|
||||
@end
|
||||
|
||||
@implementation CoreMLExecutor
|
||||
|
||||
- (bool)ExecuteWithInputs:(const std::vector<mindspore::MSTensor>&)inputs
|
||||
outputs:(const std::vector<mindspore::MSTensor>&)outputs {
|
||||
if (_model == nil) {
|
||||
return NO;
|
||||
}
|
||||
_coreMLVersion = 3;
|
||||
NSError* error = nil;
|
||||
//Initialize the CoreML feature provider with input MSTensor
|
||||
InputFeatureProvider* inputFeature =
|
||||
[[InputFeatureProvider alloc] initWithInputs:&inputs coreMLVersion:[self coreMLVersion]];
|
||||
if (inputFeature == nil) {
|
||||
NSLog(@"inputFeature initialization failed.");
|
||||
return NO;
|
||||
}
|
||||
//Inference configuration, auto use GPU by default
|
||||
MLPredictionOptions* options = [[MLPredictionOptions alloc] init];
|
||||
|
||||
//inference with specific input
|
||||
id<MLFeatureProvider> outputFeature = [_model predictionFromFeatures:inputFeature
|
||||
options:options
|
||||
error:&error];
|
||||
if (error != nil) {
|
||||
NSLog(@"Execute model failed, error code: %@", [error localizedDescription]);
|
||||
return NO;
|
||||
}
|
||||
NSSet<NSString*>* outputFeatureNames = [outputFeature featureNames];
|
||||
for (auto output : outputs) {
|
||||
auto orgOutputName = GetOrgFeatureName(output.Name());
|
||||
NSString* outputName = [NSString stringWithCString:orgOutputName.c_str()
|
||||
encoding:[NSString defaultCStringEncoding]];
|
||||
MLFeatureValue* outputValue =
|
||||
[outputFeature featureValueForName:[outputFeatureNames member:outputName]];
|
||||
auto* data = [outputValue multiArrayValue];
|
||||
float* outputData = (float*)data.dataPointer;
|
||||
if (outputData == nullptr) {
|
||||
NSLog(@"Output data is null!");
|
||||
return NO;
|
||||
}
|
||||
memcpy(output.MutableData(), outputData, output.DataSize());
|
||||
}
|
||||
return YES;
|
||||
}
|
||||
|
||||
- (bool)loadModelC:(NSURL*)compileUrl {
|
||||
NSError* error = nil;
|
||||
if (@available(iOS 12.0, *)) {
|
||||
MLModelConfiguration* config = [MLModelConfiguration alloc];
|
||||
config.computeUnits = MLComputeUnitsAll;
|
||||
_model = [MLModel modelWithContentsOfURL:compileUrl configuration:config error:&error];
|
||||
} else {
|
||||
_model = [MLModel modelWithContentsOfURL:compileUrl error:&error];
|
||||
}
|
||||
if (error != NULL) {
|
||||
NSLog(@"Create MLModel failed, error code: %@", [error localizedDescription]);
|
||||
return NO;
|
||||
}
|
||||
return YES;
|
||||
}
|
||||
@end
|
|
@ -0,0 +1,43 @@
|
|||
/**
|
||||
* Copyright 2022 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_COREML_COREML_EXECUTOR_WRAPPER_H_
|
||||
#define MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_COREML_COREML_EXECUTOR_WRAPPER_H_
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#include "include/errorcode.h"
|
||||
#include "include/api/types.h"
|
||||
|
||||
namespace mindspore {
|
||||
class CoreMLExecutorWrapper {
|
||||
public:
|
||||
CoreMLExecutorWrapper();
|
||||
|
||||
~CoreMLExecutorWrapper();
|
||||
|
||||
int Run(const std::vector<mindspore::MSTensor> &in_tensors, const std::vector<mindspore::MSTensor> &out_tensors);
|
||||
|
||||
int CompileMLModel(const std::string &modelPath);
|
||||
|
||||
int CleanTmpFile();
|
||||
|
||||
private:
|
||||
void *coreml_executor_ = nullptr;
|
||||
std::string mlmodel_path_;
|
||||
std::string mlmodelc_path_;
|
||||
};
|
||||
} // namespace mindspore
|
||||
#endif // MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_COREML_COREML_EXECUTOR_WRAPPER_H_
|
|
@ -0,0 +1,93 @@
|
|||
/**
|
||||
* Copyright 2022 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "src/runtime/delegate/coreml/coreml_executor_wrapper.h"
|
||||
#import "src/runtime/delegate/coreml/coreml_executor.h"
|
||||
|
||||
namespace mindspore {
|
||||
CoreMLExecutorWrapper::CoreMLExecutorWrapper() {
|
||||
if (coreml_executor_ == nullptr) {
|
||||
//cast object-c ptr to c ptr, and transfer its ownership to a c object to avoid auto release
|
||||
coreml_executor_ = (__bridge_retained void*)[CoreMLExecutor new];
|
||||
}
|
||||
}
|
||||
|
||||
CoreMLExecutorWrapper::~CoreMLExecutorWrapper() {
|
||||
//cast c ptr to object-c ptr, and transfer its ownership to an ARC object which is able to auto release
|
||||
auto arc_executor = (__bridge_transfer CoreMLExecutor*)coreml_executor_;
|
||||
(void)arc_executor;
|
||||
coreml_executor_ = nullptr;
|
||||
}
|
||||
|
||||
int CoreMLExecutorWrapper::CompileMLModel(const std::string &modelPath) {
|
||||
mlmodel_path_ = modelPath;
|
||||
NSString *MLModelSrcPath = [NSString stringWithCString:modelPath.c_str() encoding:[NSString defaultCStringEncoding]];
|
||||
NSError *error = nil;
|
||||
NSURL *MLModelCURL = [MLModel compileModelAtURL:[NSURL fileURLWithPath:MLModelSrcPath] error:nil];
|
||||
if (error) {
|
||||
NSLog(@"Compile MLModel to MLModelC Error: %@", error);
|
||||
(void)CleanTmpFile();
|
||||
return lite::RET_ERROR;
|
||||
}
|
||||
mlmodelc_path_ = [[MLModelCURL path] UTF8String];
|
||||
bool success = [(__bridge id)coreml_executor_ loadModelC:MLModelCURL];
|
||||
if (!success) {
|
||||
NSLog(@"Load MLModelC failed!");
|
||||
(void)CleanTmpFile();
|
||||
return lite::RET_ERROR;
|
||||
}
|
||||
auto ret = CleanTmpFile();
|
||||
if (ret != lite::RET_OK) {
|
||||
NSLog(@"Clean temp model file failed!");
|
||||
}
|
||||
return lite::RET_OK;
|
||||
}
|
||||
|
||||
int CoreMLExecutorWrapper::Run(const std::vector<mindspore::MSTensor> &in_tensors,
|
||||
const std::vector<mindspore::MSTensor> &out_tensors){
|
||||
auto success = [(__bridge id)coreml_executor_ ExecuteWithInputs:in_tensors outputs:out_tensors];
|
||||
if (!success) {
|
||||
NSLog(@"coreML model execute failed!");
|
||||
return lite::RET_ERROR;
|
||||
}
|
||||
NSLog(@"coreML model execute success!");
|
||||
return lite::RET_OK;
|
||||
}
|
||||
|
||||
int CoreMLExecutorWrapper::CleanTmpFile() {
|
||||
NSError* error = nil;
|
||||
NSString *mlModelPath = [NSString stringWithCString:mlmodel_path_.c_str() encoding:[NSString defaultCStringEncoding]];
|
||||
NSString *mlModelCPath = [NSString stringWithCString:mlmodelc_path_.c_str() encoding:[NSString defaultCStringEncoding]];
|
||||
NSFileManager *fileManager = [NSFileManager defaultManager];
|
||||
bool isDir = NO;
|
||||
if ([fileManager fileExistsAtPath:mlModelPath isDirectory:&isDir] && isDir) {
|
||||
[fileManager removeItemAtPath:mlModelPath error:&error];
|
||||
if (error != nil) {
|
||||
NSLog(@"Failed cleaning up model: %@", [error localizedDescription]);
|
||||
return lite::RET_ERROR;
|
||||
}
|
||||
}
|
||||
isDir = NO;
|
||||
if ([fileManager fileExistsAtPath:mlModelCPath isDirectory:&isDir] && isDir) {
|
||||
[fileManager removeItemAtPath:mlModelCPath error:&error];
|
||||
if (error != nil) {
|
||||
NSLog(@"Failed cleaning up compiled model: %@", [error localizedDescription]);
|
||||
return lite::RET_ERROR;
|
||||
}
|
||||
}
|
||||
return lite::RET_OK;
|
||||
}
|
||||
} // namespace mindspore
|
|
@ -0,0 +1,171 @@
|
|||
/**
|
||||
* Copyright 2022 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "src/runtime/delegate/coreml/coreml_graph.h"
|
||||
#include <fstream>
|
||||
namespace mindspore {
|
||||
CoreMLGraph::~CoreMLGraph() {
|
||||
for (auto *kernel : all_kernels_) {
|
||||
delete kernel;
|
||||
}
|
||||
for (auto *op : coreml_ops_) {
|
||||
delete op;
|
||||
}
|
||||
for (auto tensor : insert_tensors_) {
|
||||
MSTensor::DestroyTensorPtr(tensor);
|
||||
}
|
||||
delete ml_model_;
|
||||
delete executor_wrapper_;
|
||||
}
|
||||
|
||||
void CoreMLGraph::set_input(mindspore::MSTensor in_tensor, int index) {
|
||||
MS_ASSERT(static_cast<size_t>(index) < inputs_.size());
|
||||
auto origin_tensor = this->inputs_[index];
|
||||
for (auto kernel : all_kernels_) {
|
||||
for (size_t i = 0; i < kernel->inputs().size(); i++) {
|
||||
if (kernel->inputs()[i] == origin_tensor) {
|
||||
kernel->set_input(in_tensor, i);
|
||||
}
|
||||
}
|
||||
}
|
||||
this->inputs_[index] = in_tensor;
|
||||
}
|
||||
|
||||
void CoreMLGraph::set_output(mindspore::MSTensor out_tensor, int index) {
|
||||
MS_ASSERT(static_cast<size_t>(index) < outputs_.size());
|
||||
auto origin_tensor = this->outputs_[index];
|
||||
for (auto kernel : all_kernels_) {
|
||||
for (size_t i = 0; i < kernel->outputs().size(); i++) {
|
||||
if (kernel->outputs()[i] == origin_tensor) {
|
||||
kernel->set_output(out_tensor, i);
|
||||
}
|
||||
}
|
||||
}
|
||||
this->outputs_[index] = out_tensor;
|
||||
}
|
||||
|
||||
int CoreMLGraph::Init() {
|
||||
ml_model_ = BuildMLModel();
|
||||
if (ml_model_ == nullptr) {
|
||||
MS_LOG(ERROR) << "Build CoreML model failed.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
auto model_path = SaveMLModel();
|
||||
executor_wrapper_ = new (std::nothrow) CoreMLExecutorWrapper();
|
||||
if (executor_wrapper_ == nullptr) {
|
||||
MS_LOG(ERROR) << "Create CoreML executor wrapper failed.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
auto ret = executor_wrapper_->CompileMLModel(model_path);
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(ERROR) << "Compile coreML model failed!";
|
||||
return RET_ERROR;
|
||||
}
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
CoreML::Specification::Model *CoreMLGraph::BuildMLModel() {
|
||||
auto *model = new (std::nothrow) CoreML::Specification::Model();
|
||||
model->set_specificationversion(kCoreMLVersion4);
|
||||
model->mutable_neuralnetwork()->set_arrayinputshapemapping(CoreML::Specification::EXACT_ARRAY_MAPPING);
|
||||
auto *network = model->mutable_neuralnetwork();
|
||||
for (auto &op : coreml_ops_) {
|
||||
auto ret = op->BuildLayer();
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(ERROR) << "Failed to build layer for op: " << op->name();
|
||||
delete model;
|
||||
model = nullptr;
|
||||
return nullptr;
|
||||
}
|
||||
op->SetMLOpInOut();
|
||||
auto layers = op->GetLayers();
|
||||
if (layers.empty()) {
|
||||
MS_LOG(ERROR) << "No layer found for op: " << op->name();
|
||||
delete model;
|
||||
model = nullptr;
|
||||
return nullptr;
|
||||
}
|
||||
for (auto layer : layers) {
|
||||
MS_ASSERT(layer != nullptr);
|
||||
network->mutable_layers()->AddAllocated(layer);
|
||||
}
|
||||
}
|
||||
auto ret = SetMLModelInOut(model);
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(ERROR) << "Set model input output failed.";
|
||||
delete model;
|
||||
model = nullptr;
|
||||
return nullptr;
|
||||
}
|
||||
return model;
|
||||
}
|
||||
|
||||
int CoreMLGraph::SetMLModelInOut(CoreML::Specification::Model *model) {
|
||||
MS_ASSERT(model != nullptr);
|
||||
auto model_desc = model->mutable_description();
|
||||
for (const auto &in_tensor : this->inputs_) {
|
||||
// add input
|
||||
auto input = model_desc->add_input();
|
||||
input->set_name(in_tensor.Name());
|
||||
auto in_multi_array = input->mutable_type()->mutable_multiarraytype();
|
||||
if (in_tensor.DataType() == DataType::kNumberTypeFloat32) {
|
||||
in_multi_array->set_datatype(CoreML::Specification::ArrayFeatureType::FLOAT32);
|
||||
} else if (in_tensor.DataType() == DataType::kNumberTypeInt32) {
|
||||
in_multi_array->set_datatype(CoreML::Specification::ArrayFeatureType::INT32);
|
||||
} else {
|
||||
MS_LOG(ERROR) << "Unsupported model input data type: " << static_cast<int>(in_tensor.DataType());
|
||||
return RET_ERROR;
|
||||
}
|
||||
for (int64_t i : in_tensor.Shape()) {
|
||||
in_multi_array->add_shape(static_cast<uint64_t>(i));
|
||||
}
|
||||
}
|
||||
for (const auto &out_tensor : this->outputs_) {
|
||||
// add output
|
||||
auto output = model_desc->add_output();
|
||||
output->set_name(out_tensor.Name());
|
||||
auto out_multi_array = output->mutable_type()->mutable_multiarraytype();
|
||||
if (out_tensor.DataType() == DataType::kNumberTypeFloat32) {
|
||||
out_multi_array->set_datatype(CoreML::Specification::ArrayFeatureType::FLOAT32);
|
||||
} else if (out_tensor.DataType() == DataType::kNumberTypeInt32) {
|
||||
out_multi_array->set_datatype(CoreML::Specification::ArrayFeatureType::INT32);
|
||||
} else {
|
||||
MS_LOG(ERROR) << "Unsupported model output data type: " << static_cast<int>(out_tensor.DataType());
|
||||
return RET_ERROR;
|
||||
}
|
||||
for (int64_t i : out_tensor.Shape()) {
|
||||
out_multi_array->add_shape(static_cast<uint64_t>(i));
|
||||
}
|
||||
}
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
std::string CoreMLGraph::SaveMLModel() {
|
||||
MS_ASSERT(ml_model_ != nullptr);
|
||||
std::string model_name = this->name() + ".mlmodel";
|
||||
auto model_path = std::string(getenv("HOME")) + "/tmp/" + model_name;
|
||||
std::ofstream file_stream(model_path, std::ios::out | std::ios::binary);
|
||||
ml_model_->SerializeToOstream(&file_stream);
|
||||
MS_LOG(ERROR) << "Build CoreML model success!";
|
||||
return model_path;
|
||||
}
|
||||
|
||||
int CoreMLGraph::Execute() {
|
||||
auto ret = executor_wrapper_->Run(inputs(), outputs());
|
||||
MS_LOG(INFO) << "run model success!";
|
||||
return ret;
|
||||
}
|
||||
} // namespace mindspore
|
|
@ -0,0 +1,75 @@
|
|||
/**
|
||||
* Copyright 2022 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_COREML_COREML_GRAPH_H_
|
||||
#define MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_COREML_COREML_GRAPH_H_
|
||||
|
||||
#include <vector>
|
||||
#include <queue>
|
||||
#include <map>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
#include "proto/Model.pb.h"
|
||||
#include "proto/NeuralNetwork.pb.h"
|
||||
#include "include/api/kernel.h"
|
||||
#include "src/runtime/delegate/coreml/op/coreml_op.h"
|
||||
#include "src/runtime/delegate/coreml/coreml_executor_wrapper.h"
|
||||
|
||||
namespace mindspore {
|
||||
constexpr int kCoreMLVersion4 = 4;
|
||||
class CoreMLGraph : public kernel::Kernel {
|
||||
public:
|
||||
CoreMLGraph(std::vector<CoreMLOp *> coreml_ops, const std::vector<mindspore::MSTensor> &inputs,
|
||||
const std::vector<mindspore::MSTensor> &outputs)
|
||||
: kernel::Kernel(inputs, outputs, nullptr, nullptr), coreml_ops_(std::move(coreml_ops)) {}
|
||||
|
||||
~CoreMLGraph() override;
|
||||
|
||||
int Init();
|
||||
|
||||
int Prepare() override { return lite::RET_OK; }
|
||||
|
||||
int Execute() override;
|
||||
|
||||
int ReSize() override {
|
||||
MS_LOG(ERROR) << "CoreML does not support the resize function temporarily.";
|
||||
return lite::RET_ERROR;
|
||||
}
|
||||
|
||||
void set_input(mindspore::MSTensor in_tensor, int index) override;
|
||||
|
||||
void set_output(mindspore::MSTensor out_tensor, int index) override;
|
||||
|
||||
std::vector<CoreMLOp *> *GetOps() { return &coreml_ops_; }
|
||||
|
||||
std::vector<mindspore::MSTensor *> *GetInsertTensors() { return &insert_tensors_; }
|
||||
|
||||
protected:
|
||||
CoreML::Specification::Model *BuildMLModel();
|
||||
|
||||
int SetMLModelInOut(CoreML::Specification::Model *model);
|
||||
|
||||
std::string SaveMLModel();
|
||||
|
||||
std::vector<CoreMLOp *> coreml_ops_{};
|
||||
std::vector<kernel::Kernel *> all_kernels_{};
|
||||
CoreML::Specification::Model *ml_model_ = nullptr;
|
||||
CoreMLExecutorWrapper *executor_wrapper_ = nullptr;
|
||||
std::vector<mindspore::MSTensor *> insert_tensors_;
|
||||
};
|
||||
} // namespace mindspore
|
||||
|
||||
#endif // MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_COREML_COREML_GRAPH_H_
|
|
@ -0,0 +1,60 @@
|
|||
/**
|
||||
* Copyright 2022 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "src/runtime/delegate/coreml/op/activation_coreml.h"
|
||||
namespace mindspore {
|
||||
int ActivationCoreMLOp::IsSupport() {
|
||||
auto act_prim = op_primitive_->value_as_Activation();
|
||||
if (act_prim == nullptr) {
|
||||
MS_LOG(ERROR) << "Get null primitive value for op ." << name_;
|
||||
return RET_ERROR;
|
||||
}
|
||||
act_type_ = act_prim->activation_type();
|
||||
if (act_type_ != schema::ActivationType_RELU && act_type_ != schema::ActivationType_RELU6 &&
|
||||
act_type_ != schema::ActivationType_SIGMOID && act_type_ != schema::ActivationType_TANH &&
|
||||
act_type_ != schema::ActivationType_HSIGMOID && act_type_ != schema::ActivationType_LEAKY_RELU &&
|
||||
act_type_ != schema::ActivationType_SWISH && act_type_ != schema::ActivationType_ELU) {
|
||||
MS_LOG(WARNING) << "Unsupported activation type for activation op " << name_ << "when running coreML.";
|
||||
return RET_NOT_SUPPORT;
|
||||
}
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
int ActivationCoreMLOp::BuildLayer() {
|
||||
MS_ASSERT(op_ != nullptr);
|
||||
switch (act_type_) {
|
||||
case schema::ActivationType_RELU:
|
||||
op_->mutable_activation()->mutable_relu();
|
||||
break;
|
||||
case schema::ActivationType_RELU6: {
|
||||
auto clip_param = act_op_->mutable_clip();
|
||||
clip_param->set_minval(0);
|
||||
clip_param->set_maxval(kValueThreshold6);
|
||||
break;
|
||||
}
|
||||
case schema::ActivationType_TANH:
|
||||
op_->mutable_activation()->mutable_tanh();
|
||||
break;
|
||||
case schema::ActivationType_SIGMOID:
|
||||
op_->mutable_activation()->mutable_sigmoid();
|
||||
break;
|
||||
default:
|
||||
MS_LOG(ERROR) << "Unsupported activation type.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
return RET_OK;
|
||||
}
|
||||
} // namespace mindspore
|
|
@ -0,0 +1,37 @@
|
|||
/**
|
||||
* Copyright 2022 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#ifndef MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_COREML_OP_ACTIVATION_COREML_H_
|
||||
#define MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_COREML_OP_ACTIVATION_COREML_H_
|
||||
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#include "src/runtime/delegate/coreml/op/coreml_op.h"
|
||||
namespace mindspore {
|
||||
class ActivationCoreMLOp : public CoreMLOp {
|
||||
public:
|
||||
ActivationCoreMLOp(const schema::Primitive *primitive, const std::vector<mindspore::MSTensor> &in_tensors,
|
||||
const std::vector<mindspore::MSTensor> &out_tensors, std::string name)
|
||||
: CoreMLOp(primitive, in_tensors, out_tensors, name) {}
|
||||
|
||||
int IsSupport() override;
|
||||
|
||||
int BuildLayer() override;
|
||||
|
||||
private:
|
||||
schema::ActivationType act_type_ = schema::ActivationType_NO_ACTIVATION;
|
||||
};
|
||||
} // namespace mindspore
|
||||
#endif // MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_COREML_OP_ACTIVATION_COREML_H_
|
|
@ -0,0 +1,104 @@
|
|||
/**
|
||||
* Copyright 2022 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "src/runtime/delegate/coreml/op/arithmetic_coreml.h"
|
||||
namespace mindspore {
|
||||
int ArithmeticCoreMLOp::IsSupport() {
|
||||
MS_CHECK_TRUE_MSG(in_tensors_.size() == kInputSize1, RET_NOT_SUPPORT, "Arithmetic op only support two inputs.");
|
||||
auto input_a = in_tensors_.at(0);
|
||||
auto input_b = in_tensors_.at(1);
|
||||
if ((input_a.IsConst() && input_a.ElementNum() == 1) || (input_b.IsConst() && input_b.ElementNum() == 1)) {
|
||||
use_normal_ = true;
|
||||
}
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
int ArithmeticCoreMLOp::BuildLayer() {
|
||||
if (use_normal_) {
|
||||
auto ret = BuildNormalArithmetic();
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(ERROR) << "Build normal arithmetic layer failed for op: " << name_;
|
||||
return RET_ERROR;
|
||||
}
|
||||
return RET_OK;
|
||||
}
|
||||
auto ret = BuildBroadcastableArithmetic();
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(ERROR) << "Build broadcastable arithmetic layer failed for op: " << name_;
|
||||
return RET_ERROR;
|
||||
}
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
int ArithmeticCoreMLOp::BuildNormalArithmetic() {
|
||||
MS_ASSERT(op_ != nullptr);
|
||||
switch (type_) {
|
||||
case schema::PrimitiveType_AddFusion: {
|
||||
auto add_param = op_->mutable_add();
|
||||
SetNormalConst<CoreML::Specification::AddLayerParams>(add_param);
|
||||
break;
|
||||
}
|
||||
case schema::PrimitiveType_MulFusion: {
|
||||
auto mul_param = op_->mutable_multiply();
|
||||
SetNormalConst<CoreML::Specification::MultiplyLayerParams>(mul_param);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
MS_LOG(ERROR) << "Unsupported arithmetic type.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
int ArithmeticCoreMLOp::BuildBroadcastableArithmetic() {
|
||||
MS_ASSERT(op_ != nullptr);
|
||||
switch (type_) {
|
||||
case schema::PrimitiveType_AddFusion:
|
||||
(void)op_->mutable_addbroadcastable();
|
||||
break;
|
||||
case schema::PrimitiveType_MulFusion:
|
||||
(void)op_->mutable_multiplybroadcastable();
|
||||
break;
|
||||
default:
|
||||
MS_LOG(ERROR) << "Unsupported arithmetic type.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
for (const auto &in_tensor : in_tensors_) {
|
||||
if (in_tensor.IsConst()) {
|
||||
auto ret = SetConstInput(in_tensor);
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(ERROR) << "Set const input failed for op: " << name_;
|
||||
return RET_ERROR;
|
||||
}
|
||||
}
|
||||
}
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
void ArithmeticCoreMLOp::SetMLOpInOut() {
|
||||
MS_ASSERT(op_ != nullptr);
|
||||
for (const auto &in_tensor : in_tensors_) {
|
||||
if (in_tensor.IsConst() && !use_normal_) {
|
||||
// const op has not input
|
||||
const_ops_[in_tensor.Name()]->add_output(in_tensor.Name());
|
||||
}
|
||||
if (!(in_tensor.IsConst() && use_normal_)) {
|
||||
op_->add_input(in_tensor.Name());
|
||||
}
|
||||
}
|
||||
op_->add_output(out_tensors_[0].Name());
|
||||
}
|
||||
} // namespace mindspore
|
|
@ -0,0 +1,58 @@
|
|||
/**
|
||||
* Copyright 2022 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#ifndef MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_COREML_OP_ARITHMETIC_COREML_H_
|
||||
#define MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_COREML_OP_ARITHMETIC_COREML_H_
|
||||
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#include "src/runtime/delegate/coreml/op/coreml_op.h"
|
||||
namespace mindspore {
|
||||
class ArithmeticCoreMLOp : public CoreMLOp {
|
||||
public:
|
||||
ArithmeticCoreMLOp(const schema::Primitive *primitive, const std::vector<mindspore::MSTensor> &in_tensors,
|
||||
const std::vector<mindspore::MSTensor> &out_tensors, std::string name)
|
||||
: CoreMLOp(primitive, in_tensors, out_tensors, name) {}
|
||||
|
||||
int IsSupport() override;
|
||||
|
||||
int BuildLayer() override;
|
||||
|
||||
int BuildNormalArithmetic();
|
||||
|
||||
int BuildBroadcastableArithmetic();
|
||||
|
||||
protected:
|
||||
void SetMLOpInOut() override;
|
||||
|
||||
template <typename T>
|
||||
void SetNormalConst(T *arithmetic_param) {
|
||||
const void *org_data = nullptr;
|
||||
if (in_tensors_[0].IsConst()) {
|
||||
org_data = in_tensors_[0].Data().get();
|
||||
} else if (in_tensors_[1].IsConst()) {
|
||||
org_data = in_tensors_[1].Data().get();
|
||||
}
|
||||
if (org_data != nullptr) {
|
||||
auto const_data = reinterpret_cast<const float *>(org_data);
|
||||
arithmetic_param->set_alpha(const_data[0]);
|
||||
}
|
||||
}
|
||||
|
||||
protected:
|
||||
bool use_normal_ = false;
|
||||
};
|
||||
} // namespace mindspore
|
||||
#endif // MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_COREML_OP_ARITHMETIC_COREML_H_
|
|
@ -0,0 +1,72 @@
|
|||
/**
|
||||
* Copyright 2022 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "src/runtime/delegate/coreml/op/avg_pooling_coreml.h"
|
||||
namespace mindspore {
|
||||
int AvgPoolingCoreMLOp::InitParams() {
|
||||
pooling_prim_ = op_primitive_->value_as_AvgPoolFusion();
|
||||
if (pooling_prim_ == nullptr) {
|
||||
MS_LOG(ERROR) << "Get null primitive value for op ." << name_;
|
||||
return RET_ERROR;
|
||||
}
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
int AvgPoolingCoreMLOp::BuildLayer() {
|
||||
MS_ASSERT(op_ != nullptr);
|
||||
auto pooling_param = op_->mutable_pooling();
|
||||
pooling_param->set_type(CoreML::Specification::PoolingLayerParams::AVERAGE);
|
||||
if (pooling_prim_->global()) {
|
||||
pooling_param->set_globalpooling(true);
|
||||
pooling_param->mutable_valid();
|
||||
return RET_OK;
|
||||
}
|
||||
pooling_param->set_avgpoolexcludepadding(true);
|
||||
auto kernel_h = static_cast<int>(*(pooling_prim_->kernel_size()->begin()));
|
||||
auto kernel_w = static_cast<int>(*(pooling_prim_->kernel_size()->begin() + 1));
|
||||
auto stride_h = static_cast<int>(*(pooling_prim_->strides()->begin()));
|
||||
auto stride_w = static_cast<int>(*(pooling_prim_->strides()->begin() + 1));
|
||||
pooling_param->add_stride(stride_h);
|
||||
pooling_param->add_stride(stride_w);
|
||||
pooling_param->add_kernelsize(kernel_h);
|
||||
pooling_param->add_kernelsize(kernel_w);
|
||||
if (pooling_prim_->pad_mode() == schema::PadMode_SAME) {
|
||||
pooling_param->mutable_same();
|
||||
} else {
|
||||
pooling_param->mutable_valid();
|
||||
if (pooling_prim_->pad() != nullptr) {
|
||||
auto pad_u = static_cast<int>(*(pooling_prim_->pad()->begin() + PAD_UP));
|
||||
auto pad_d = static_cast<int>(*(pooling_prim_->pad()->begin() + PAD_DOWN));
|
||||
auto pad_l = static_cast<int>(*(pooling_prim_->pad()->begin() + PAD_LEFT));
|
||||
auto pad_r = static_cast<int>(*(pooling_prim_->pad()->begin() + PAD_RIGHT));
|
||||
auto ret = SetPadding({pad_u, pad_d, pad_l, pad_r});
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(ERROR) << "Fail to set padding for op: " << name_;
|
||||
return RET_ERROR;
|
||||
}
|
||||
}
|
||||
}
|
||||
auto act_type = pooling_prim_->activation_type();
|
||||
if (act_type != schema::ActivationType_NO_ACTIVATION) {
|
||||
auto ret = SetActivation(act_type);
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(ERROR) << "Set pooling activation failed for op: " << name_;
|
||||
return RET_ERROR;
|
||||
}
|
||||
}
|
||||
return RET_OK;
|
||||
}
|
||||
} // namespace mindspore
|
|
@ -0,0 +1,39 @@
|
|||
/**
|
||||
* Copyright 2022 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#ifndef MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_COREML_OP_AVG_POOLING_COREML_H_
|
||||
#define MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_COREML_OP_AVG_POOLING_COREML_H_
|
||||
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
#include <unordered_map>
|
||||
#include "src/runtime/delegate/coreml/op/coreml_op.h"
|
||||
namespace mindspore {
|
||||
class AvgPoolingCoreMLOp : public CoreMLOp {
|
||||
public:
|
||||
AvgPoolingCoreMLOp(const schema::Primitive *primitive, const std::vector<mindspore::MSTensor> &in_tensors,
|
||||
const std::vector<mindspore::MSTensor> &out_tensors, std::string name)
|
||||
: CoreMLOp(primitive, in_tensors, out_tensors, name) {}
|
||||
|
||||
int InitParams() override;
|
||||
|
||||
int BuildLayer() override;
|
||||
|
||||
private:
|
||||
const schema::AvgPoolFusion *pooling_prim_ = nullptr;
|
||||
};
|
||||
} // namespace mindspore
|
||||
#endif // MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_COREML_OP_AVG_POOLING_COREML_H_
|
|
@ -0,0 +1,72 @@
|
|||
/**
|
||||
* Copyright 2022 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "src/runtime/delegate/coreml/op/concat_coreml.h"
|
||||
namespace mindspore {
|
||||
int ConcatCoreMLOp::IsSupport() {
|
||||
MS_CHECK_GE(in_tensors_.size(), kInputSize1, RET_NOT_SUPPORT);
|
||||
if (std::any_of(in_tensors_.begin(), in_tensors_.end(), [](mindspore::MSTensor &tensor) {
|
||||
return tensor.IsConst() && tensor.DataType() != DataType::kNumberTypeInt32 &&
|
||||
tensor.DataType() != DataType::kNumberTypeFloat32;
|
||||
})) {
|
||||
MS_LOG(ERROR) << "The datatype of CoreML Concat op's constant inputs must be int or float, op name: " << name_;
|
||||
return RET_NOT_SUPPORT;
|
||||
}
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
int ConcatCoreMLOp::InitParams() {
|
||||
concat_prim_ = op_primitive_->value_as_Concat();
|
||||
if (concat_prim_ == nullptr) {
|
||||
MS_LOG(ERROR) << "Get null primitive value for op ." << name_;
|
||||
return RET_ERROR;
|
||||
}
|
||||
axis_ = concat_prim_->axis();
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
int ConcatCoreMLOp::BuildLayer() {
|
||||
MS_ASSERT(op_ != nullptr);
|
||||
op_->mutable_concatnd()->set_axis(axis_);
|
||||
for (const auto &in_tensor : in_tensors_) {
|
||||
if (in_tensor.IsConst()) {
|
||||
auto ret = SetConstInput(in_tensor);
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(ERROR) << "Set const input failed for op: " << name_;
|
||||
return RET_ERROR;
|
||||
}
|
||||
}
|
||||
}
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
int ConcatCoreMLOp::HandleAxis() {
|
||||
axis_ = NCHW2NHWC_PERM[axis_];
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
void ConcatCoreMLOp::SetMLOpInOut() {
|
||||
MS_ASSERT(op_ != nullptr);
|
||||
for (const auto &in_tensor : in_tensors_) {
|
||||
if (in_tensor.IsConst()) {
|
||||
// const op has not input
|
||||
const_ops_[in_tensor.Name()]->add_output(in_tensor.Name());
|
||||
}
|
||||
op_->add_input(in_tensor.Name());
|
||||
}
|
||||
op_->add_output(out_tensors_[0].Name());
|
||||
}
|
||||
} // namespace mindspore
|
|
@ -0,0 +1,44 @@
|
|||
/**
|
||||
* Copyright 2022 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#ifndef MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_COREML_OP_CONCAT_COREML_H_
|
||||
#define MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_COREML_OP_CONCAT_COREML_H_
|
||||
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#include "src/runtime/delegate/coreml/op/coreml_op.h"
|
||||
namespace mindspore {
|
||||
class ConcatCoreMLOp : public CoreMLOp {
|
||||
public:
|
||||
ConcatCoreMLOp(const schema::Primitive *primitive, const std::vector<mindspore::MSTensor> &in_tensors,
|
||||
const std::vector<mindspore::MSTensor> &out_tensors, std::string name)
|
||||
: CoreMLOp(primitive, in_tensors, out_tensors, name) {}
|
||||
|
||||
int IsSupport() override;
|
||||
|
||||
int InitParams() override;
|
||||
|
||||
int HandleAxis() override;
|
||||
|
||||
int BuildLayer() override;
|
||||
|
||||
void SetMLOpInOut() override;
|
||||
|
||||
private:
|
||||
int axis_;
|
||||
const schema::Concat *concat_prim_ = nullptr;
|
||||
};
|
||||
} // namespace mindspore
|
||||
#endif // MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_COREML_OP_CONCAT_COREML_H_
|
|
@ -0,0 +1,89 @@
|
|||
/**
|
||||
* Copyright 2022 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "src/runtime/delegate/coreml/op/convolution_base_coreml.h"
|
||||
#include "src/runtime/delegate/delegate_utils.h"
|
||||
namespace mindspore {
|
||||
int ConvolutionBaseCoreMLOp::SetConvWeight() {
|
||||
auto weight_tensor = in_tensors_.at(kWeightIndex);
|
||||
auto weight_shape = weight_tensor.Shape();
|
||||
conv_param_->set_kernelchannels(weight_shape.at(MS_WT_CIN));
|
||||
conv_param_->set_outputchannels(weight_shape.at(MS_WT_COUT));
|
||||
conv_param_->add_kernelsize(weight_shape.at(MS_WT_H));
|
||||
conv_param_->add_kernelsize(weight_shape.at(MS_WT_W));
|
||||
|
||||
// transpose the weight, (c_out, h, w, c_in) -> (c_out, c_in, h, w)
|
||||
auto org_weight = weight_tensor.Data().get();
|
||||
MS_ASSERT(org_weight != nullptr);
|
||||
if (weight_tensor.DataType() == DataType::kNumberTypeFloat32) {
|
||||
auto *ml_weight_container = conv_param_->mutable_weights()->mutable_floatvalue();
|
||||
ml_weight_container->Resize(weight_tensor.ElementNum(), 0);
|
||||
auto *ml_weight = reinterpret_cast<void *>(ml_weight_container->mutable_data());
|
||||
lite::PackNHWCToNCHWFp32(org_weight, ml_weight, weight_shape[MS_WT_COUT],
|
||||
weight_shape[MS_WT_H] * weight_shape[MS_WT_W], weight_shape[MS_WT_CIN]);
|
||||
} else {
|
||||
MS_LOG(ERROR) << "Unsupported data type of weight tensor for CoreML convolution.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
int ConvolutionBaseCoreMLOp::SetConvBias() {
|
||||
if (in_tensors_.size() >= kInputSize2) {
|
||||
auto bias_tensor = in_tensors_.at(kBiasIndex);
|
||||
auto org_bias = bias_tensor.Data().get();
|
||||
conv_param_->set_hasbias(true);
|
||||
if (bias_tensor.DataType() == DataType::kNumberTypeFloat32) {
|
||||
auto *ml_bias_container = conv_param_->mutable_bias()->mutable_floatvalue();
|
||||
ml_bias_container->Resize(bias_tensor.ElementNum(), 0);
|
||||
auto *ml_bias = reinterpret_cast<void *>(ml_bias_container->mutable_data());
|
||||
memcpy(ml_bias, org_bias, bias_tensor.DataSize());
|
||||
} else {
|
||||
MS_LOG(ERROR) << "Unsupported data type of bias tensor for CoreML convolution.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
}
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
int ConvolutionBaseCoreMLOp::BuildLayer() {
|
||||
MS_ASSERT(op_ != nullptr);
|
||||
conv_param_ = op_->mutable_convolution();
|
||||
auto ret = SetConvParam();
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(ERROR) << "Set conv param failed for op: " << name_;
|
||||
return RET_ERROR;
|
||||
}
|
||||
ret = SetConvWeight();
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(ERROR) << "Set conv weight failed for op: " << name_;
|
||||
return RET_ERROR;
|
||||
}
|
||||
ret = SetConvBias();
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(ERROR) << "Set conv bias failed for op: " << name_;
|
||||
return RET_ERROR;
|
||||
}
|
||||
if (act_type_ != schema::ActivationType_NO_ACTIVATION) {
|
||||
ret = SetActivation(act_type_);
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(ERROR) << "Set conv activation failed for op: " << name_;
|
||||
return RET_ERROR;
|
||||
}
|
||||
}
|
||||
return RET_OK;
|
||||
}
|
||||
} // namespace mindspore
|
|
@ -0,0 +1,61 @@
|
|||
/**
|
||||
* Copyright 2022 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#ifndef MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_COREML_OP_CONVOLUTION_BASE_COREML_H_
|
||||
#define MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_COREML_OP_CONVOLUTION_BASE_COREML_H_
|
||||
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
#include <memory>
|
||||
#include <unordered_map>
|
||||
#include "src/runtime/delegate/coreml/op/coreml_op.h"
|
||||
namespace mindspore {
|
||||
class ConvolutionBaseCoreMLOp : public CoreMLOp {
|
||||
public:
|
||||
ConvolutionBaseCoreMLOp(const schema::Primitive *primitive, const std::vector<mindspore::MSTensor> &in_tensors,
|
||||
const std::vector<mindspore::MSTensor> &out_tensors, std::string name)
|
||||
: CoreMLOp(primitive, in_tensors, out_tensors, name) {
|
||||
input_h_ = static_cast<int>(in_tensors.at(0).Shape().at(kNHWC_H));
|
||||
input_w_ = static_cast<int>(in_tensors.at(0).Shape().at(kNHWC_W));
|
||||
kernel_h_ = static_cast<int>(in_tensors.at(1).Shape().at(MS_WT_H));
|
||||
kernel_w_ = static_cast<int>(in_tensors.at(1).Shape().at(MS_WT_W));
|
||||
output_h_ = static_cast<int>(out_tensors.at(0).Shape().at(kNHWC_H));
|
||||
output_w_ = static_cast<int>(out_tensors.at(0).Shape().at(kNHWC_W));
|
||||
}
|
||||
|
||||
int BuildLayer() override;
|
||||
|
||||
protected:
|
||||
virtual int SetConvParam() { return RET_OK; }
|
||||
|
||||
virtual int SetConvWeight();
|
||||
|
||||
virtual int SetConvBias();
|
||||
|
||||
protected:
|
||||
int input_h_;
|
||||
int input_w_;
|
||||
int kernel_h_;
|
||||
int kernel_w_;
|
||||
int output_h_;
|
||||
int output_w_;
|
||||
CoreML::Specification::ConvolutionLayerParams *conv_param_ = nullptr;
|
||||
schema::ActivationType act_type_ = schema::ActivationType_NO_ACTIVATION;
|
||||
std::unique_ptr<CoreML::Specification::NeuralNetworkLayer> trans_in_op_ = nullptr;
|
||||
std::unique_ptr<CoreML::Specification::NeuralNetworkLayer> trans_out_op_ = nullptr;
|
||||
};
|
||||
} // namespace mindspore
|
||||
#endif // MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_COREML_OP_CONVOLUTION_BASE_COREML_H_
|
|
@ -0,0 +1,71 @@
|
|||
/**
|
||||
* Copyright 2022 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "src/runtime/delegate/coreml/op/convolution_coreml.h"
|
||||
#include <cmath>
|
||||
#include "src/runtime/delegate/delegate_utils.h"
|
||||
namespace mindspore {
|
||||
int ConvolutionCoreMLOp::IsSupport() {
|
||||
if (!in_tensors_[kWeightIndex].IsConst()) {
|
||||
MS_LOG(WARNING) << "CoreML convolution does not support dynamic weight.";
|
||||
return RET_NOT_SUPPORT;
|
||||
}
|
||||
conv_prim_ = op_primitive_->value_as_Conv2DFusion();
|
||||
if (conv_prim_ == nullptr) {
|
||||
MS_LOG(ERROR) << "Get null primitive value for op ." << name_;
|
||||
return RET_ERROR;
|
||||
}
|
||||
CHECK_NULL_RETURN(conv_prim_->stride());
|
||||
stride_h_ = static_cast<int>(*(conv_prim_->stride()->begin()));
|
||||
stride_w_ = static_cast<int>(*(conv_prim_->stride()->begin() + 1));
|
||||
CHECK_NULL_RETURN(conv_prim_->dilation());
|
||||
dilation_h_ = static_cast<int>(*(conv_prim_->dilation()->begin()));
|
||||
dilation_w_ = static_cast<int>(*(conv_prim_->dilation()->begin() + 1));
|
||||
// org conv format: NHWC
|
||||
if (stride_h_ > in_tensors_[0].Shape()[kNHWC_H] || stride_w_ > in_tensors_[0].Shape()[kNHWC_W]) {
|
||||
MS_LOG(WARNING) << "CoreML convolution does not support stride greater than input size.";
|
||||
return RET_NOT_SUPPORT;
|
||||
}
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
int ConvolutionCoreMLOp::SetConvParam() {
|
||||
auto group = static_cast<int>(conv_prim_->group());
|
||||
conv_param_->set_ngroups(group);
|
||||
conv_param_->add_stride(stride_h_);
|
||||
conv_param_->add_stride(stride_w_);
|
||||
conv_param_->add_dilationfactor(dilation_h_);
|
||||
conv_param_->add_dilationfactor(dilation_w_);
|
||||
if (conv_prim_->pad_mode() == schema::PadMode_SAME) {
|
||||
conv_param_->mutable_same();
|
||||
} else {
|
||||
conv_param_->mutable_valid();
|
||||
if (conv_prim_->pad_list() != nullptr) {
|
||||
auto pad_u = static_cast<int>(*(conv_prim_->pad_list()->begin() + PAD_UP));
|
||||
auto pad_d = static_cast<int>(*(conv_prim_->pad_list()->begin() + PAD_DOWN));
|
||||
auto pad_l = static_cast<int>(*(conv_prim_->pad_list()->begin() + PAD_LEFT));
|
||||
auto pad_r = static_cast<int>(*(conv_prim_->pad_list()->begin() + PAD_RIGHT));
|
||||
auto ret = SetPadding({pad_u, pad_d, pad_l, pad_r});
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(ERROR) << "Fail to set padding for op: " << name_;
|
||||
return RET_ERROR;
|
||||
}
|
||||
}
|
||||
}
|
||||
act_type_ = conv_prim_->activation_type();
|
||||
return RET_OK;
|
||||
}
|
||||
} // namespace mindspore
|
|
@ -0,0 +1,46 @@
|
|||
/**
|
||||
* Copyright 2022 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#ifndef MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_COREML_OP_CONVOLUTION_COREML_H_
|
||||
#define MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_COREML_OP_CONVOLUTION_COREML_H_
|
||||
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
#include <unordered_map>
|
||||
#include "src/runtime/delegate/coreml/op/convolution_base_coreml.h"
|
||||
namespace mindspore {
|
||||
class ConvolutionCoreMLOp : public ConvolutionBaseCoreMLOp {
|
||||
public:
|
||||
ConvolutionCoreMLOp(const schema::Primitive *primitive, const std::vector<mindspore::MSTensor> &in_tensors,
|
||||
const std::vector<mindspore::MSTensor> &out_tensors, std::string name)
|
||||
: ConvolutionBaseCoreMLOp(primitive, in_tensors, out_tensors, name) {}
|
||||
|
||||
int IsSupport() override;
|
||||
|
||||
private:
|
||||
schema::PadMode GetPadMode();
|
||||
|
||||
int SetConvParam() override;
|
||||
|
||||
private:
|
||||
int stride_h_;
|
||||
int stride_w_;
|
||||
int dilation_h_;
|
||||
int dilation_w_;
|
||||
const schema::Conv2DFusion *conv_prim_ = nullptr;
|
||||
};
|
||||
} // namespace mindspore
|
||||
#endif // MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_COREML_OP_CONVOLUTION_COREML_H_
|
|
@ -0,0 +1,155 @@
|
|||
/**
|
||||
* Copyright 2022 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "src/runtime/delegate/coreml/op/coreml_op.h"
|
||||
#include "nnacl/base/cast_base.h"
|
||||
namespace mindspore {
|
||||
int CoreMLOp::Init() {
|
||||
auto ret = InitParams();
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(ERROR) << "CoreML op " << name_ << "'s parameter initialization failed.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
op_ = std::make_unique<CoreML::Specification::NeuralNetworkLayer>();
|
||||
if (op_ == nullptr) {
|
||||
MS_LOG(ERROR) << "New CoreML op " << name_ << " failed.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
op_->set_name("CoreML_" + name_);
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
int CoreMLOp::SetActivation(schema::ActivationType act_type) {
|
||||
act_op_ = std::make_unique<CoreML::Specification::NeuralNetworkLayer>();
|
||||
if (act_op_ == nullptr) {
|
||||
MS_LOG(ERROR) << "New CoreML op " << name_ << "_activation failed.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
act_op_->set_name("CoreML_" + name_ + "_activation");
|
||||
switch (act_type) {
|
||||
case schema::ActivationType_RELU:
|
||||
act_op_->mutable_activation()->mutable_relu();
|
||||
break;
|
||||
case schema::ActivationType_RELU6: {
|
||||
auto clip_param = act_op_->mutable_clip();
|
||||
clip_param->set_minval(0);
|
||||
clip_param->set_maxval(kValueThreshold6);
|
||||
break;
|
||||
}
|
||||
case schema::ActivationType_TANH:
|
||||
act_op_->mutable_activation()->mutable_tanh();
|
||||
break;
|
||||
case schema::ActivationType_SIGMOID:
|
||||
act_op_->mutable_activation()->mutable_sigmoid();
|
||||
break;
|
||||
default:
|
||||
MS_LOG(ERROR) << "Unsupported activation type.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
int CoreMLOp::SetPadding(std::vector<int> pad_list) {
|
||||
pad_op_ = std::make_unique<CoreML::Specification::NeuralNetworkLayer>();
|
||||
if (pad_op_ == nullptr) {
|
||||
MS_LOG(ERROR) << "New CoreML op " << name_ << "_pad failed.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
pad_op_->set_name("CoreML_" + name_ + "_pad");
|
||||
auto pad_param = pad_op_->mutable_padding();
|
||||
pad_param->mutable_constant();
|
||||
auto height_border = pad_param->mutable_paddingamounts()->add_borderamounts();
|
||||
auto width_border = pad_param->mutable_paddingamounts()->add_borderamounts();
|
||||
height_border->set_startedgesize(pad_list[PAD_UP]);
|
||||
height_border->set_endedgesize(pad_list[PAD_DOWN]);
|
||||
width_border->set_startedgesize(pad_list[PAD_LEFT]);
|
||||
width_border->set_endedgesize(pad_list[PAD_RIGHT]);
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
int CoreMLOp::SetConstInput(const mindspore::MSTensor &in_tensor) {
|
||||
MS_CHECK_TRUE_MSG(in_tensor.IsConst(), RET_ERROR, "Only constant tensor can be set as CoreML Const op.");
|
||||
std::string const_op_name = "CoreML_" + in_tensor.Name() + "_const";
|
||||
auto const_op = std::make_unique<CoreML::Specification::NeuralNetworkLayer>();
|
||||
if (const_op == nullptr) {
|
||||
MS_LOG(ERROR) << "New CoreML const op " << const_op_name << " for op " << name_ << " failed.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
const_op->set_name(const_op_name);
|
||||
auto const_param = const_op->mutable_loadconstantnd();
|
||||
for (auto i : in_tensor.Shape()) {
|
||||
const_param->add_shape(static_cast<uint64_t>(i));
|
||||
}
|
||||
if (in_tensor.Shape().empty()) {
|
||||
const_param->add_shape(1);
|
||||
}
|
||||
// set const data
|
||||
auto org_data = in_tensor.Data().get();
|
||||
auto *ml_data_container = const_param->mutable_data()->mutable_floatvalue();
|
||||
ml_data_container->Resize(in_tensor.ElementNum(), 0);
|
||||
auto *ml_data = reinterpret_cast<float *>(ml_data_container->mutable_data());
|
||||
if (in_tensor.DataType() == DataType::kNumberTypeInt32) {
|
||||
Int32ToFloat32(reinterpret_cast<const int *>(org_data), ml_data, in_tensor.ElementNum());
|
||||
} else if (in_tensor.DataType() == DataType::kNumberTypeFloat32) {
|
||||
memcpy(ml_data, org_data, in_tensor.DataSize());
|
||||
} else {
|
||||
MS_LOG(ERROR) << "Unsupported const input data type: " << static_cast<int>(in_tensor.DataType());
|
||||
return RET_ERROR;
|
||||
}
|
||||
const_ops_[in_tensor.Name()] = std::move(const_op);
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
void CoreMLOp::SetMLOpInOut() {
|
||||
MS_ASSERT(op_ != nullptr);
|
||||
auto input_name = in_tensors_.at(0).Name();
|
||||
if (pad_op_ != nullptr) {
|
||||
std::string pad_name = op_->name() + "_pad_0";
|
||||
pad_op_->add_input(input_name);
|
||||
pad_op_->add_output(pad_name);
|
||||
op_->add_input(pad_name);
|
||||
} else {
|
||||
op_->add_input(input_name);
|
||||
}
|
||||
auto output_name = out_tensors_.at(0).Name();
|
||||
if (act_op_ != nullptr) {
|
||||
std::string act_name = op_->name() + "_act_0";
|
||||
op_->add_output(act_name);
|
||||
act_op_->add_input(act_name);
|
||||
act_op_->add_output(output_name);
|
||||
} else {
|
||||
op_->add_output(output_name);
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<CoreML::Specification::NeuralNetworkLayer *> CoreMLOp::GetLayers() {
|
||||
MS_ASSERT(op_ != nullptr);
|
||||
std::vector<CoreML::Specification::NeuralNetworkLayer *> ret_ops;
|
||||
if (pad_op_ != nullptr) {
|
||||
ret_ops.push_back(pad_op_.release());
|
||||
}
|
||||
if (!const_ops_.empty()) {
|
||||
for (auto it = const_ops_.begin(); it != const_ops_.end(); it++) {
|
||||
ret_ops.push_back(it->second.release());
|
||||
}
|
||||
}
|
||||
ret_ops.push_back(op_.release());
|
||||
if (act_op_ != nullptr) {
|
||||
ret_ops.push_back(act_op_.release());
|
||||
}
|
||||
return ret_ops;
|
||||
}
|
||||
} // namespace mindspore
|
|
@ -0,0 +1,155 @@
|
|||
/**
|
||||
* Copyright 2022 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_COREML_OP_COREML_OP_
|
||||
#define MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_COREML_OP_COREML_OP_
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#include <set>
|
||||
#include <memory>
|
||||
#include <unordered_map>
|
||||
#include "proto/Model.pb.h"
|
||||
#include "proto/NeuralNetwork.pb.h"
|
||||
#include "schema/model_generated.h"
|
||||
#include "include/errorcode.h"
|
||||
#include "include/api/types.h"
|
||||
#include "include/api/data_type.h"
|
||||
#include "src/common/log_adapter.h"
|
||||
#include "src/common/log_util.h"
|
||||
#include "nnacl/op_base.h"
|
||||
using mindspore::lite::RET_ERROR;
|
||||
using mindspore::lite::RET_NOT_SUPPORT;
|
||||
using mindspore::lite::RET_OK;
|
||||
namespace mindspore {
|
||||
inline const std::vector<int> NHWC2NCHW_PERM = {0, 3, 1, 2};
|
||||
inline const std::vector<int> NCHW2NHWC_PERM = {0, 2, 3, 1};
|
||||
enum COREML_WEIGHT_SHAPE { ML_WT_COUT = 0, ML_WT_CIN = 1, ML_WT_H = 2, ML_WT_W = 3 };
|
||||
enum MSLITE_WEIGHT_SHAPE { MS_WT_COUT = 0, MS_WT_H = 1, MS_WT_W = 2, MS_WT_CIN = 3 };
|
||||
enum PAD { PAD_UP = 0, PAD_DOWN = 1, PAD_LEFT = 2, PAD_RIGHT = 3 };
|
||||
constexpr int REPEAT_TIMES2 = 2;
|
||||
class CoreMLOp {
|
||||
public:
|
||||
CoreMLOp(const schema::Primitive *primitive, std::vector<mindspore::MSTensor> in_tensors,
|
||||
std::vector<mindspore::MSTensor> out_tensors, std::string name)
|
||||
: op_primitive_(primitive),
|
||||
in_tensors_(std::move(in_tensors)),
|
||||
out_tensors_(std::move(out_tensors)),
|
||||
name_(std::move(name)) {
|
||||
if (primitive != nullptr) {
|
||||
type_ = primitive->value_type();
|
||||
}
|
||||
}
|
||||
|
||||
// the op will be managed by coreml model, no need to manually deconstruct
|
||||
virtual ~CoreMLOp() = default;
|
||||
|
||||
virtual int IsSupport() { return RET_OK; }
|
||||
|
||||
virtual int Init();
|
||||
|
||||
virtual int InitParams() { return RET_OK; }
|
||||
|
||||
virtual int HandleAxis() { return RET_OK; }
|
||||
|
||||
virtual int BuildLayer() { return RET_OK; }
|
||||
|
||||
// override this method if the op has tensor which does not need to add to graph,e.g.,const tensor.
|
||||
virtual void SetMLOpInOut();
|
||||
|
||||
// Transfer the ownership of op to coreml model; Multiple layers are possible to be build for one op, thus using
|
||||
// vector as return.
|
||||
virtual std::vector<CoreML::Specification::NeuralNetworkLayer *> GetLayers();
|
||||
|
||||
virtual int SetActivation(schema::ActivationType act_type);
|
||||
|
||||
virtual int SetPadding(std::vector<int> pad_list);
|
||||
|
||||
virtual int SetConstInput(const mindspore::MSTensor &in_tensor);
|
||||
|
||||
void set_inputs(const std::vector<mindspore::MSTensor> &in_tensors) { this->in_tensors_ = in_tensors; }
|
||||
|
||||
void set_input(const mindspore::MSTensor &in_tensor, int index) {
|
||||
MS_ASSERT(static_cast<size_t>(index) < in_tensors_.size());
|
||||
this->in_tensors_[index] = in_tensor;
|
||||
}
|
||||
|
||||
void set_outputs(const std::vector<mindspore::MSTensor> &out_tensors) { this->out_tensors_ = out_tensors; }
|
||||
|
||||
const std::vector<mindspore::MSTensor> &inputs() { return this->in_tensors_; }
|
||||
|
||||
const std::vector<mindspore::MSTensor> &outputs() { return this->out_tensors_; }
|
||||
|
||||
void set_in_ops(const std::vector<CoreMLOp *> &in_ops) { this->in_ops_ = in_ops; }
|
||||
|
||||
void set_out_ops(const std::vector<CoreMLOp *> &out_ops) { this->out_ops_ = out_ops; }
|
||||
|
||||
const std::vector<CoreMLOp *> &in_ops() const { return this->in_ops_; }
|
||||
|
||||
const std::vector<CoreMLOp *> &out_ops() const { return this->out_ops_; }
|
||||
|
||||
schema::PrimitiveType type() const { return type_; }
|
||||
|
||||
std::string name() const { return this->name_; }
|
||||
|
||||
void set_name(const std::string &name) { this->name_ = name; }
|
||||
|
||||
protected:
|
||||
const schema::Primitive *op_primitive_ = nullptr;
|
||||
std::vector<mindspore::MSTensor> in_tensors_;
|
||||
std::vector<mindspore::MSTensor> out_tensors_;
|
||||
std::vector<CoreMLOp *> in_ops_;
|
||||
std::vector<CoreMLOp *> out_ops_;
|
||||
schema::PrimitiveType type_ = schema::PrimitiveType_NONE;
|
||||
std::string name_;
|
||||
std::unique_ptr<CoreML::Specification::NeuralNetworkLayer> op_ = nullptr;
|
||||
std::unique_ptr<CoreML::Specification::NeuralNetworkLayer> pad_op_ = nullptr;
|
||||
std::unique_ptr<CoreML::Specification::NeuralNetworkLayer> act_op_ = nullptr;
|
||||
std::unordered_map<std::string, std::unique_ptr<CoreML::Specification::NeuralNetworkLayer>> const_ops_ = {};
|
||||
};
|
||||
|
||||
typedef CoreMLOp *(*CoreMLGetOp)(const schema::Primitive *primitive, const std::vector<mindspore::MSTensor> &in_tensors,
|
||||
const std::vector<mindspore::MSTensor> &out_tensors, const std::string &name);
|
||||
|
||||
template <class T>
|
||||
CoreMLOp *GetCoreMLOp(const schema::Primitive *primitive, const std::vector<mindspore::MSTensor> &in_tensors,
|
||||
const std::vector<mindspore::MSTensor> &out_tensors, const std::string &name) {
|
||||
auto shape = out_tensors.front().Shape();
|
||||
if (std::find(shape.begin(), shape.end(), -1) != shape.end()) {
|
||||
MS_LOG(ERROR) << "CoreML does not support runtime inference shape.";
|
||||
return nullptr;
|
||||
}
|
||||
auto *op = new (std::nothrow) T(primitive, in_tensors, out_tensors, name);
|
||||
if (op == nullptr) {
|
||||
MS_LOG(ERROR) << "op is nullptr.";
|
||||
return nullptr;
|
||||
}
|
||||
auto ret = op->IsSupport();
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(WARNING) << "CoreML op is not supported.";
|
||||
delete op;
|
||||
return nullptr;
|
||||
}
|
||||
ret = op->Init();
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(WARNING) << "CoreML op init failed.";
|
||||
delete op;
|
||||
return nullptr;
|
||||
}
|
||||
return op;
|
||||
}
|
||||
} // namespace mindspore
|
||||
#endif // MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_COREML_OP_COREML_OP_
|
|
@ -0,0 +1,70 @@
|
|||
/**
|
||||
* Copyright 2022 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "src/runtime/delegate/coreml/op/deconvolution_coreml.h"
|
||||
#include "src/runtime/delegate/delegate_utils.h"
|
||||
namespace mindspore {
|
||||
int DeconvolutionCoreMLOp::IsSupport() {
|
||||
if (!in_tensors_[kWeightIndex].IsConst()) {
|
||||
MS_LOG(WARNING) << "CoreML deconvolution does not support dynamic weight.";
|
||||
return RET_NOT_SUPPORT;
|
||||
}
|
||||
deconv_prim_ = op_primitive_->value_as_Conv2dTransposeFusion();
|
||||
if (deconv_prim_ == nullptr) {
|
||||
MS_LOG(ERROR) << "Get null primitive value for op ." << name_;
|
||||
return RET_ERROR;
|
||||
}
|
||||
if (static_cast<int>(deconv_prim_->group()) != 1) {
|
||||
MS_LOG(WARNING) << "Only support group equals 1 for npu deconvolution op";
|
||||
return RET_NOT_SUPPORT;
|
||||
}
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
int DeconvolutionCoreMLOp::SetConvParam() {
|
||||
conv_param_->set_isdeconvolution(true);
|
||||
CHECK_NULL_RETURN(deconv_prim_->stride());
|
||||
auto stride_h = static_cast<int>(*(deconv_prim_->stride()->begin()));
|
||||
auto stride_w = static_cast<int>(*(deconv_prim_->stride()->begin() + 1));
|
||||
conv_param_->add_stride(stride_h);
|
||||
conv_param_->add_stride(stride_w);
|
||||
CHECK_NULL_RETURN(deconv_prim_->dilation());
|
||||
auto dilation_h = static_cast<int>(*(deconv_prim_->dilation()->begin()));
|
||||
auto dilation_w = static_cast<int>(*(deconv_prim_->dilation()->begin() + 1));
|
||||
conv_param_->add_dilationfactor(dilation_h);
|
||||
conv_param_->add_dilationfactor(dilation_w);
|
||||
conv_param_->add_outputshape(output_h_);
|
||||
conv_param_->add_outputshape(output_w_);
|
||||
if (deconv_prim_->pad_mode() == schema::PadMode_SAME) {
|
||||
conv_param_->mutable_same();
|
||||
} else {
|
||||
conv_param_->mutable_valid();
|
||||
if (deconv_prim_->pad_list() != nullptr) {
|
||||
auto pad_u = static_cast<int>(*(deconv_prim_->pad_list()->begin() + PAD_UP));
|
||||
auto pad_d = static_cast<int>(*(deconv_prim_->pad_list()->begin() + PAD_DOWN));
|
||||
auto pad_l = static_cast<int>(*(deconv_prim_->pad_list()->begin() + PAD_LEFT));
|
||||
auto pad_r = static_cast<int>(*(deconv_prim_->pad_list()->begin() + PAD_RIGHT));
|
||||
auto ret = SetPadding({pad_u, pad_d, pad_l, pad_r});
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(ERROR) << "Fail to set padding for op: " << name_;
|
||||
return RET_ERROR;
|
||||
}
|
||||
}
|
||||
}
|
||||
act_type_ = deconv_prim_->activation_type();
|
||||
return RET_OK;
|
||||
}
|
||||
} // namespace mindspore
|
|
@ -0,0 +1,42 @@
|
|||
/**
|
||||
* Copyright 2022 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#ifndef MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_COREML_OP_DECONVOLUTION_COREML_H_
|
||||
#define MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_COREML_OP_DECONVOLUTION_COREML_H_
|
||||
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
#include <unordered_map>
|
||||
#include "src/runtime/delegate/coreml/op/convolution_base_coreml.h"
|
||||
namespace mindspore {
|
||||
class DeconvolutionCoreMLOp : public ConvolutionBaseCoreMLOp {
|
||||
public:
|
||||
DeconvolutionCoreMLOp(const schema::Primitive *primitive, const std::vector<mindspore::MSTensor> &in_tensors,
|
||||
const std::vector<mindspore::MSTensor> &out_tensors, std::string name)
|
||||
: ConvolutionBaseCoreMLOp(primitive, in_tensors, out_tensors, name) {}
|
||||
|
||||
int IsSupport() override;
|
||||
|
||||
private:
|
||||
schema::PadMode GetPadMode();
|
||||
|
||||
int SetConvParam() override;
|
||||
|
||||
private:
|
||||
const schema::Conv2dTransposeFusion *deconv_prim_ = nullptr;
|
||||
};
|
||||
} // namespace mindspore
|
||||
#endif // MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_COREML_OP_DECONVOLUTION_COREML_H_
|
|
@ -0,0 +1,24 @@
|
|||
/**
|
||||
* Copyright 2022 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "src/runtime/delegate/coreml/op/flatten_coreml.h"
|
||||
namespace mindspore {
|
||||
int FlattenCoreMLOp::BuildLayer() {
|
||||
MS_ASSERT(op_ != nullptr);
|
||||
(void)op_->mutable_flattento2d();
|
||||
return RET_OK;
|
||||
}
|
||||
} // namespace mindspore
|
|
@ -0,0 +1,32 @@
|
|||
/**
|
||||
* Copyright 2022 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#ifndef MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_COREML_OP_FLATTEN_COREML_H_
|
||||
#define MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_COREML_OP_FLATTEN_COREML_H_
|
||||
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#include "src/runtime/delegate/coreml/op/coreml_op.h"
|
||||
namespace mindspore {
|
||||
class FlattenCoreMLOp : public CoreMLOp {
|
||||
public:
|
||||
FlattenCoreMLOp(const schema::Primitive *primitive, const std::vector<mindspore::MSTensor> &in_tensors,
|
||||
const std::vector<mindspore::MSTensor> &out_tensors, std::string name)
|
||||
: CoreMLOp(primitive, in_tensors, out_tensors, name) {}
|
||||
|
||||
int BuildLayer() override;
|
||||
};
|
||||
} // namespace mindspore
|
||||
#endif // MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_COREML_OP_FLATTEN_COREML_H_
|
|
@ -0,0 +1,51 @@
|
|||
/**
|
||||
* Copyright 2022 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "src/runtime/delegate/coreml/op/gather_coreml.h"
|
||||
namespace mindspore {
|
||||
int GatherCoreMLOp::IsSupport() {
|
||||
MS_CHECK_GE(in_tensors_.size(), kInputSize2, RET_NOT_SUPPORT);
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
int GatherCoreMLOp::BuildLayer() {
|
||||
MS_ASSERT(op_ != nullptr);
|
||||
auto gather_params = op_->mutable_gather();
|
||||
CHECK_NULL_RETURN(in_tensors_[THIRD_INPUT].Data());
|
||||
auto axis_data = reinterpret_cast<const int *>(in_tensors_[THIRD_INPUT].Data().get());
|
||||
gather_params->set_axis(axis_data[0]);
|
||||
auto indices_tensor = in_tensors_[SECOND_INPUT];
|
||||
if (indices_tensor.IsConst()) {
|
||||
auto ret = SetConstInput(indices_tensor);
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(ERROR) << "Set const input failed for op: " << name_;
|
||||
return RET_ERROR;
|
||||
}
|
||||
}
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
void GatherCoreMLOp::SetMLOpInOut() {
|
||||
MS_ASSERT(op_ != nullptr);
|
||||
op_->add_input(in_tensors_[FIRST_INPUT].Name());
|
||||
auto indices_tensor = in_tensors_[SECOND_INPUT];
|
||||
if (indices_tensor.IsConst()) {
|
||||
const_ops_[indices_tensor.Name()]->add_output(indices_tensor.Name());
|
||||
}
|
||||
op_->add_input(indices_tensor.Name());
|
||||
op_->add_output(out_tensors_[0].Name());
|
||||
}
|
||||
} // namespace mindspore
|
|
@ -0,0 +1,36 @@
|
|||
/**
|
||||
* Copyright 2022 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#ifndef MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_COREML_OP_GATHER_COREML_H_
|
||||
#define MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_COREML_OP_GATHER_COREML_H_
|
||||
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#include "src/runtime/delegate/coreml/op/coreml_op.h"
|
||||
namespace mindspore {
|
||||
class GatherCoreMLOp : public CoreMLOp {
|
||||
public:
|
||||
GatherCoreMLOp(const schema::Primitive *primitive, const std::vector<mindspore::MSTensor> &in_tensors,
|
||||
const std::vector<mindspore::MSTensor> &out_tensors, std::string name)
|
||||
: CoreMLOp(primitive, in_tensors, out_tensors, name) {}
|
||||
|
||||
int IsSupport() override;
|
||||
|
||||
int BuildLayer() override;
|
||||
|
||||
void SetMLOpInOut() override;
|
||||
};
|
||||
} // namespace mindspore
|
||||
#endif // MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_COREML_OP_GATHER_COREML_H_
|
|
@ -0,0 +1,184 @@
|
|||
/**
|
||||
* Copyright 2022 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "src/runtime/delegate/coreml/op/matmul_coreml.h"
|
||||
namespace mindspore {
|
||||
int MatMulCoreMLOp::IsSupport() {
|
||||
MS_CHECK_GE(in_tensors_.size(), kInputSize1, RET_NOT_SUPPORT);
|
||||
if (in_tensors_.size() > kInputSize1 && !in_tensors_.at(SECOND_INPUT).IsConst()) {
|
||||
MS_LOG(WARNING) << "Bias for CoreML matmul is supported only when the second input is a constant.";
|
||||
return RET_NOT_SUPPORT;
|
||||
}
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
int MatMulCoreMLOp::InitParams() {
|
||||
matmul_prim_ = op_primitive_->value_as_MatMulFusion();
|
||||
if (matmul_prim_ == nullptr) {
|
||||
MS_LOG(ERROR) << "Get null primitive value for op ." << name_;
|
||||
return RET_ERROR;
|
||||
}
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
int MatMulCoreMLOp::BuildLayer() {
|
||||
MS_ASSERT(op_ != nullptr);
|
||||
matmul_param_ = op_->mutable_batchedmatmul();
|
||||
matmul_param_->set_transposea(matmul_prim_->transpose_a());
|
||||
matmul_param_->set_transposeb(matmul_prim_->transpose_b());
|
||||
if (in_tensors_.at(SECOND_INPUT).IsConst()) {
|
||||
if (matmul_prim_->transpose_b()) {
|
||||
auto ret = ConstMatMulWithTransB();
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(ERROR) << "Build MatMul layer with const input and true TransposeB failed for op: " << name_;
|
||||
return RET_ERROR;
|
||||
}
|
||||
} else {
|
||||
// CoreML will automatically transpose the const input even though transposeB is false.
|
||||
auto ret = ConstMatMulWithoutTransB();
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(ERROR) << "Build MatMul layer with const input and false TransposeB failed for op: " << name_;
|
||||
return RET_ERROR;
|
||||
}
|
||||
}
|
||||
}
|
||||
auto act_type = matmul_prim_->activation_type();
|
||||
if (act_type != schema::ActivationType_NO_ACTIVATION) {
|
||||
auto ret = SetActivation(act_type);
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(ERROR) << "Set matmul activation failed for op: " << name_;
|
||||
return RET_ERROR;
|
||||
}
|
||||
}
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
int MatMulCoreMLOp::ConstMatMulWithTransB() {
|
||||
MS_ASSERT(matmul_param_ != nullptr);
|
||||
auto input_b = in_tensors_.at(SECOND_INPUT);
|
||||
auto dim_b = input_b.Shape().size();
|
||||
int64_t in_channel =
|
||||
matmul_prim_->transpose_b() ? input_b.Shape()[dim_b - DIMENSION_1D] : input_b.Shape()[dim_b - DIMENSION_2D];
|
||||
int64_t out_channel =
|
||||
matmul_prim_->transpose_b() ? input_b.Shape()[dim_b - DIMENSION_2D] : input_b.Shape()[dim_b - DIMENSION_1D];
|
||||
matmul_param_->set_weightmatrixfirstdimension(in_channel);
|
||||
matmul_param_->set_weightmatrixseconddimension(out_channel);
|
||||
auto org_weight = input_b.Data().get();
|
||||
if (input_b.DataType() == DataType::kNumberTypeFloat32) {
|
||||
auto *ml_weight_container = matmul_param_->mutable_weights()->mutable_floatvalue();
|
||||
ml_weight_container->Resize(input_b.ElementNum(), 0);
|
||||
auto *ml_weight = reinterpret_cast<void *>(ml_weight_container->mutable_data());
|
||||
memcpy(ml_weight, org_weight, input_b.DataSize());
|
||||
} else {
|
||||
MS_LOG(ERROR) << "Unsupported data type of weight tensor for CoreML convolution.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
if (in_tensors_.size() > kInputSize1) {
|
||||
auto bias_tensor = in_tensors_.at(THIRD_INPUT);
|
||||
auto org_bias = bias_tensor.Data().get();
|
||||
matmul_param_->set_hasbias(true);
|
||||
if (bias_tensor.DataType() == DataType::kNumberTypeFloat32) {
|
||||
auto *ml_bias_container = matmul_param_->mutable_bias()->mutable_floatvalue();
|
||||
ml_bias_container->Resize(bias_tensor.ElementNum(), 0);
|
||||
auto *ml_bias = reinterpret_cast<void *>(ml_bias_container->mutable_data());
|
||||
memcpy(ml_bias, org_bias, bias_tensor.DataSize());
|
||||
} else {
|
||||
MS_LOG(ERROR) << "Unsupported data type of bias tensor for CoreML convolution.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
}
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
int MatMulCoreMLOp::ConstMatMulWithoutTransB() {
|
||||
MS_ASSERT(matmul_param_ != nullptr);
|
||||
auto ret = SetConstInput(in_tensors_[SECOND_INPUT]);
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(ERROR) << "Set const input failed for op: " << name_;
|
||||
return RET_ERROR;
|
||||
}
|
||||
if (in_tensors_.size() > kInputSize1) {
|
||||
// when the second input is not const anymore, the bias param will be invalid.
|
||||
bias_op_ = std::make_unique<CoreML::Specification::NeuralNetworkLayer>();
|
||||
if (bias_op_ == nullptr) {
|
||||
MS_LOG(ERROR) << "New CoreML op " << name_ << "_bias failed.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
bias_op_->set_name("CoreML_" + name_ + "_bias");
|
||||
(void)bias_op_->mutable_addbroadcastable();
|
||||
ret = SetConstInput(in_tensors_[THIRD_INPUT]);
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(ERROR) << "Set const input failed for op: " << name_;
|
||||
return RET_ERROR;
|
||||
}
|
||||
}
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
void MatMulCoreMLOp::SetMLOpInOut() {
|
||||
MS_ASSERT(op_ != nullptr);
|
||||
op_->add_input(in_tensors_.at(FIRST_INPUT).Name());
|
||||
auto input_b_name = in_tensors_.at(SECOND_INPUT).Name();
|
||||
auto output_name = out_tensors_.at(0).Name();
|
||||
if (!in_tensors_.at(SECOND_INPUT).IsConst()) {
|
||||
op_->add_input(input_b_name);
|
||||
} else if (!const_ops_.empty()) {
|
||||
const_ops_[input_b_name]->add_output(input_b_name);
|
||||
op_->add_input(input_b_name);
|
||||
if (bias_op_ != nullptr) {
|
||||
std::string bias_name = op_->name() + "_bias_0";
|
||||
op_->add_output(bias_name);
|
||||
bias_op_->add_input(bias_name);
|
||||
auto input_c_name = in_tensors_.at(THIRD_INPUT).Name();
|
||||
const_ops_[input_c_name]->add_output(input_c_name);
|
||||
bias_op_->add_input(input_c_name);
|
||||
}
|
||||
}
|
||||
if (act_op_ != nullptr) {
|
||||
std::string act_name = op_->name() + "_act_0";
|
||||
if (bias_op_ != nullptr) {
|
||||
bias_op_->add_output(act_name);
|
||||
} else {
|
||||
op_->add_output(act_name);
|
||||
}
|
||||
act_op_->add_input(act_name);
|
||||
act_op_->add_output(output_name);
|
||||
return;
|
||||
}
|
||||
if (bias_op_ != nullptr) {
|
||||
bias_op_->add_output(output_name);
|
||||
} else {
|
||||
op_->add_output(output_name);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
std::vector<CoreML::Specification::NeuralNetworkLayer *> MatMulCoreMLOp::GetLayers() {
|
||||
MS_ASSERT(op_ != nullptr);
|
||||
std::vector<CoreML::Specification::NeuralNetworkLayer *> ret_ops;
|
||||
for (auto it = const_ops_.begin(); it != const_ops_.end(); it++) {
|
||||
ret_ops.push_back(it->second.release());
|
||||
}
|
||||
ret_ops.push_back(op_.release());
|
||||
if (bias_op_ != nullptr) {
|
||||
ret_ops.push_back(bias_op_.release());
|
||||
}
|
||||
if (act_op_ != nullptr) {
|
||||
ret_ops.push_back(act_op_.release());
|
||||
}
|
||||
return ret_ops;
|
||||
}
|
||||
} // namespace mindspore
|
|
@ -0,0 +1,50 @@
|
|||
/**
|
||||
* Copyright 2022 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#ifndef MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_COREML_OP_MATMUL_COREML_H_
|
||||
#define MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_COREML_OP_MATMUL_COREML_H_
|
||||
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#include <memory>
|
||||
#include "src/runtime/delegate/coreml/op/coreml_op.h"
|
||||
namespace mindspore {
|
||||
class MatMulCoreMLOp : public CoreMLOp {
|
||||
public:
|
||||
MatMulCoreMLOp(const schema::Primitive *primitive, const std::vector<mindspore::MSTensor> &in_tensors,
|
||||
const std::vector<mindspore::MSTensor> &out_tensors, std::string name)
|
||||
: CoreMLOp(primitive, in_tensors, out_tensors, name) {}
|
||||
|
||||
int IsSupport() override;
|
||||
|
||||
int InitParams() override;
|
||||
|
||||
int BuildLayer() override;
|
||||
|
||||
std::vector<CoreML::Specification::NeuralNetworkLayer *> GetLayers() override;
|
||||
|
||||
void SetMLOpInOut() override;
|
||||
|
||||
int ConstMatMulWithTransB();
|
||||
|
||||
int ConstMatMulWithoutTransB();
|
||||
|
||||
protected:
|
||||
const schema::MatMulFusion *matmul_prim_ = nullptr;
|
||||
CoreML::Specification::BatchedMatMulLayerParams *matmul_param_ = nullptr;
|
||||
std::unique_ptr<CoreML::Specification::NeuralNetworkLayer> bias_op_ = nullptr;
|
||||
};
|
||||
} // namespace mindspore
|
||||
#endif // MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_COREML_OP_MATMUL_COREML_H_
|
|
@ -0,0 +1,71 @@
|
|||
/**
|
||||
* Copyright 2022 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "src/runtime/delegate/coreml/op/max_pooling_coreml.h"
|
||||
namespace mindspore {
|
||||
int MaxPoolingCoreMLOp::InitParams() {
|
||||
pooling_prim_ = op_primitive_->value_as_MaxPoolFusion();
|
||||
if (pooling_prim_ == nullptr) {
|
||||
MS_LOG(ERROR) << "Get null primitive value for op ." << name_;
|
||||
return RET_ERROR;
|
||||
}
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
int MaxPoolingCoreMLOp::BuildLayer() {
|
||||
MS_ASSERT(op_ != nullptr);
|
||||
auto pooling_param = op_->mutable_pooling();
|
||||
pooling_param->set_type(CoreML::Specification::PoolingLayerParams::MAX);
|
||||
if (pooling_prim_->global()) {
|
||||
pooling_param->set_globalpooling(true);
|
||||
pooling_param->mutable_valid();
|
||||
return RET_OK;
|
||||
}
|
||||
auto kernel_h = static_cast<int>(*(pooling_prim_->kernel_size()->begin()));
|
||||
auto kernel_w = static_cast<int>(*(pooling_prim_->kernel_size()->begin() + 1));
|
||||
auto stride_h = static_cast<int>(*(pooling_prim_->strides()->begin()));
|
||||
auto stride_w = static_cast<int>(*(pooling_prim_->strides()->begin() + 1));
|
||||
pooling_param->add_stride(stride_h);
|
||||
pooling_param->add_stride(stride_w);
|
||||
pooling_param->add_kernelsize(kernel_h);
|
||||
pooling_param->add_kernelsize(kernel_w);
|
||||
if (pooling_prim_->pad_mode() == schema::PadMode_SAME) {
|
||||
pooling_param->mutable_same();
|
||||
} else {
|
||||
pooling_param->mutable_valid();
|
||||
if (pooling_prim_->pad() != nullptr) {
|
||||
auto pad_u = static_cast<int>(*(pooling_prim_->pad()->begin() + PAD_UP));
|
||||
auto pad_d = static_cast<int>(*(pooling_prim_->pad()->begin() + PAD_DOWN));
|
||||
auto pad_l = static_cast<int>(*(pooling_prim_->pad()->begin() + PAD_LEFT));
|
||||
auto pad_r = static_cast<int>(*(pooling_prim_->pad()->begin() + PAD_RIGHT));
|
||||
auto ret = SetPadding({pad_u, pad_d, pad_l, pad_r});
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(ERROR) << "Fail to set padding for op: " << name_;
|
||||
return RET_ERROR;
|
||||
}
|
||||
}
|
||||
}
|
||||
auto act_type = pooling_prim_->activation_type();
|
||||
if (act_type != schema::ActivationType_NO_ACTIVATION) {
|
||||
auto ret = SetActivation(act_type);
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(ERROR) << "Set pooling activation failed for op: " << name_;
|
||||
return RET_ERROR;
|
||||
}
|
||||
}
|
||||
return RET_OK;
|
||||
}
|
||||
} // namespace mindspore
|
|
@ -0,0 +1,39 @@
|
|||
/**
|
||||
* Copyright 2022 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#ifndef MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_COREML_OP_MAX_POOLING_COREML_H_
|
||||
#define MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_COREML_OP_MAX_POOLING_COREML_H_
|
||||
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
#include <unordered_map>
|
||||
#include "src/runtime/delegate/coreml/op/coreml_op.h"
|
||||
namespace mindspore {
|
||||
class MaxPoolingCoreMLOp : public CoreMLOp {
|
||||
public:
|
||||
MaxPoolingCoreMLOp(const schema::Primitive *primitive, const std::vector<mindspore::MSTensor> &in_tensors,
|
||||
const std::vector<mindspore::MSTensor> &out_tensors, std::string name)
|
||||
: CoreMLOp(primitive, in_tensors, out_tensors, name) {}
|
||||
|
||||
int InitParams() override;
|
||||
|
||||
int BuildLayer() override;
|
||||
|
||||
private:
|
||||
const schema::MaxPoolFusion *pooling_prim_ = nullptr;
|
||||
};
|
||||
} // namespace mindspore
|
||||
#endif // MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_COREML_OP_MAX_POOLING_COREML_H_
|
|
@ -0,0 +1,48 @@
|
|||
/**
|
||||
* Copyright 2022 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing shapeissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "src/runtime/delegate/coreml/op/reshape_coreml.h"
|
||||
namespace mindspore {
|
||||
int ReshapeCoreMLOp::IsSupport() {
|
||||
MS_CHECK_GE(in_tensors_.size(), kInputSize1, RET_NOT_SUPPORT);
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
int ReshapeCoreMLOp::BuildLayer() {
|
||||
MS_ASSERT(op_ != nullptr);
|
||||
auto shape_tensor = in_tensors_.at(1);
|
||||
if (shape_tensor.IsConst()) {
|
||||
auto shape_dim = shape_tensor.ElementNum();
|
||||
auto shape_data = reinterpret_cast<const int *>(shape_tensor.Data().get());
|
||||
auto shape_param = op_->mutable_reshapestatic();
|
||||
for (int i = 0; i < shape_dim; i++) {
|
||||
shape_param->add_targetshape(shape_data[i]);
|
||||
}
|
||||
} else {
|
||||
op_->mutable_reshapedynamic();
|
||||
}
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
void ReshapeCoreMLOp::SetMLOpInOut() {
|
||||
MS_ASSERT(op_ != nullptr);
|
||||
op_->add_input(in_tensors_[0].Name());
|
||||
if (!in_tensors_[1].IsConst()) {
|
||||
op_->add_input(in_tensors_[1].Name());
|
||||
}
|
||||
op_->add_output(out_tensors_[0].Name());
|
||||
}
|
||||
} // namespace mindspore
|
|
@ -0,0 +1,36 @@
|
|||
/**
|
||||
* Copyright 2022 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#ifndef MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_COREML_OP_RESHAPE_COREML_H_
|
||||
#define MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_COREML_OP_RESHAPE_COREML_H_
|
||||
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#include "src/runtime/delegate/coreml/op/coreml_op.h"
|
||||
namespace mindspore {
|
||||
class ReshapeCoreMLOp : public CoreMLOp {
|
||||
public:
|
||||
ReshapeCoreMLOp(const schema::Primitive *primitive, const std::vector<mindspore::MSTensor> &in_tensors,
|
||||
const std::vector<mindspore::MSTensor> &out_tensors, std::string name)
|
||||
: CoreMLOp(primitive, in_tensors, out_tensors, name) {}
|
||||
|
||||
int IsSupport() override;
|
||||
|
||||
int BuildLayer() override;
|
||||
|
||||
void SetMLOpInOut() override;
|
||||
};
|
||||
} // namespace mindspore
|
||||
#endif // MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_COREML_OP_RESHAPE_COREML_H_
|
|
@ -0,0 +1,83 @@
|
|||
/**
|
||||
* Copyright 2022 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "src/runtime/delegate/coreml/op/resize_coreml.h"
|
||||
namespace mindspore {
|
||||
int ResizeCoreMLOp::IsSupport() {
|
||||
resize_prim_ = op_primitive_->value_as_Resize();
|
||||
if (resize_prim_ == nullptr) {
|
||||
MS_LOG(ERROR) << "Get null primitive value for op ." << name_;
|
||||
return RET_ERROR;
|
||||
}
|
||||
auto resize_method = resize_prim_->method();
|
||||
if (resize_method != schema::ResizeMethod_LINEAR && resize_method != schema::ResizeMethod_NEAREST) {
|
||||
MS_LOG(WARNING) << "Unsupported resize method type: " << resize_method;
|
||||
return RET_NOT_SUPPORT;
|
||||
}
|
||||
if (resize_method != schema::ResizeMethod_LINEAR ||
|
||||
resize_prim_->coordinate_transform_mode() != schema::CoordinateTransformMode_ALIGN_CORNERS) {
|
||||
use_upsample_ = true;
|
||||
if (in_tensors_.size() != kInputSize1 || !in_tensors_[1].IsConst() || in_tensors_[1].ElementNum() != C2NUM) {
|
||||
MS_LOG(WARNING) << "The second input must be a constant with two scale values of height and width when using "
|
||||
"CoreML upsample layer for op: "
|
||||
<< name_;
|
||||
return RET_NOT_SUPPORT;
|
||||
}
|
||||
}
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
int ResizeCoreMLOp::BuildLayer() {
|
||||
MS_ASSERT(op_ != nullptr);
|
||||
if (use_upsample_) {
|
||||
auto resize_param = op_->mutable_upsample();
|
||||
MS_CHECK_GE(in_tensors_.size(), kInputSize1, RET_NOT_SUPPORT);
|
||||
auto scale_tensor = in_tensors_.at(1);
|
||||
auto scale_data = scale_tensor.Data().get();
|
||||
if (scale_tensor.DataType() == DataType::kNumberTypeInt32) {
|
||||
resize_param->add_scalingfactor(static_cast<const int *>(scale_data)[0]);
|
||||
resize_param->add_scalingfactor(static_cast<const int *>(scale_data)[1]);
|
||||
} else if (scale_tensor.DataType() == DataType::kNumberTypeFloat32) {
|
||||
resize_param->add_fractionalscalingfactor(static_cast<const float *>(scale_data)[0]);
|
||||
resize_param->add_fractionalscalingfactor(static_cast<const float *>(scale_data)[1]);
|
||||
} else {
|
||||
MS_LOG(ERROR) << "Unsupported Resize scale data type: " << static_cast<int>(scale_tensor.DataType());
|
||||
return RET_ERROR;
|
||||
}
|
||||
if (resize_prim_->method() == schema::ResizeMethod_LINEAR) {
|
||||
resize_param->set_mode(CoreML::Specification::UpsampleLayerParams_InterpolationMode_BILINEAR);
|
||||
if (resize_prim_->coordinate_transform_mode() == schema::CoordinateTransformMode_ALIGN_CORNERS) {
|
||||
resize_param->set_linearupsamplemode(
|
||||
CoreML::Specification::UpsampleLayerParams_LinearUpsampleMode_ALIGN_CORNERS_TRUE);
|
||||
} else {
|
||||
resize_param->set_linearupsamplemode(
|
||||
CoreML::Specification::UpsampleLayerParams_LinearUpsampleMode_ALIGN_CORNERS_FALSE);
|
||||
}
|
||||
} else if (resize_prim_->method() == schema::ResizeMethod_NEAREST) {
|
||||
resize_param->set_mode(CoreML::Specification::UpsampleLayerParams_InterpolationMode_NN);
|
||||
}
|
||||
return RET_OK;
|
||||
}
|
||||
// Using resize_bilinear op. The op executed with NCHW format.
|
||||
auto out_height = static_cast<int>(out_tensors_.at(0).Shape().at(kNCHW_H));
|
||||
auto out_width = static_cast<int>(out_tensors_.at(0).Shape().at(kNCHW_W));
|
||||
auto resize_param = op_->mutable_resizebilinear();
|
||||
resize_param->add_targetsize(out_height);
|
||||
resize_param->add_targetsize(out_width);
|
||||
resize_param->mutable_mode()->set_samplingmethod(CoreML::Specification::SamplingMode::STRICT_ALIGN_ENDPOINTS_MODE);
|
||||
return RET_OK;
|
||||
}
|
||||
} // namespace mindspore
|
|
@ -0,0 +1,38 @@
|
|||
/**
|
||||
* Copyright 2022 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#ifndef MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_COREML_OP_RESIZE_COREML_H_
|
||||
#define MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_COREML_OP_RESIZE_COREML_H_
|
||||
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#include "src/runtime/delegate/coreml/op/coreml_op.h"
|
||||
namespace mindspore {
|
||||
class ResizeCoreMLOp : public CoreMLOp {
|
||||
public:
|
||||
ResizeCoreMLOp(const schema::Primitive *primitive, const std::vector<mindspore::MSTensor> &in_tensors,
|
||||
const std::vector<mindspore::MSTensor> &out_tensors, std::string name)
|
||||
: CoreMLOp(primitive, in_tensors, out_tensors, name) {}
|
||||
|
||||
int IsSupport() override;
|
||||
|
||||
int BuildLayer() override;
|
||||
|
||||
protected:
|
||||
const schema::Resize *resize_prim_ = nullptr;
|
||||
bool use_upsample_ = false;
|
||||
};
|
||||
} // namespace mindspore
|
||||
#endif // MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_COREML_OP_RESIZE_COREML_H_
|
|
@ -0,0 +1,24 @@
|
|||
/**
|
||||
* Copyright 2022 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "src/runtime/delegate/coreml/op/shape_coreml.h"
|
||||
namespace mindspore {
|
||||
int ShapeCoreMLOp::BuildLayer() {
|
||||
MS_ASSERT(op_ != nullptr);
|
||||
(void)op_->mutable_getshape();
|
||||
return RET_OK;
|
||||
}
|
||||
} // namespace mindspore
|
|
@ -0,0 +1,32 @@
|
|||
/**
|
||||
* Copyright 2022 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#ifndef MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_COREML_OP_SHAPE_COREML_H_
|
||||
#define MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_COREML_OP_SHAPE_COREML_H_
|
||||
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#include "src/runtime/delegate/coreml/op/coreml_op.h"
|
||||
namespace mindspore {
|
||||
class ShapeCoreMLOp : public CoreMLOp {
|
||||
public:
|
||||
ShapeCoreMLOp(const schema::Primitive *primitive, const std::vector<mindspore::MSTensor> &in_tensors,
|
||||
const std::vector<mindspore::MSTensor> &out_tensors, std::string name)
|
||||
: CoreMLOp(primitive, in_tensors, out_tensors, name) {}
|
||||
|
||||
int BuildLayer() override;
|
||||
};
|
||||
} // namespace mindspore
|
||||
#endif // MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_COREML_OP_SHAPE_COREML_H_
|
|
@ -0,0 +1,41 @@
|
|||
/**
|
||||
* Copyright 2022 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "src/runtime/delegate/coreml/op/softmax_coreml.h"
|
||||
namespace mindspore {
|
||||
int SoftmaxCoreMLOp::InitParams() {
|
||||
softmax_prim_ = op_primitive_->value_as_Softmax();
|
||||
if (softmax_prim_ == nullptr) {
|
||||
MS_LOG(ERROR) << "Get null primitive value for op ." << name_;
|
||||
return RET_ERROR;
|
||||
}
|
||||
MS_CHECK_TRUE_MSG(softmax_prim_->axis() != nullptr, RET_ERROR, "Softmax axis is null!");
|
||||
axis_ = static_cast<int>(*(softmax_prim_->axis()->begin()));
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
int SoftmaxCoreMLOp::BuildLayer() {
|
||||
MS_ASSERT(op_ != nullptr && softmax_prim_ != nullptr);
|
||||
auto softmax_param = op_->mutable_softmaxnd();
|
||||
softmax_param->set_axis(axis_);
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
int SoftmaxCoreMLOp::HandleAxis() {
|
||||
axis_ = NCHW2NHWC_PERM[axis_];
|
||||
return RET_OK;
|
||||
}
|
||||
} // namespace mindspore
|
|
@ -0,0 +1,40 @@
|
|||
/**
|
||||
* Copyright 2022 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#ifndef MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_COREML_OP_SOFTMAX_COREML_H_
|
||||
#define MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_COREML_OP_SOFTMAX_COREML_H_
|
||||
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#include "src/runtime/delegate/coreml/op/coreml_op.h"
|
||||
namespace mindspore {
|
||||
class SoftmaxCoreMLOp : public CoreMLOp {
|
||||
public:
|
||||
SoftmaxCoreMLOp(const schema::Primitive *primitive, const std::vector<mindspore::MSTensor> &in_tensors,
|
||||
const std::vector<mindspore::MSTensor> &out_tensors, std::string name)
|
||||
: CoreMLOp(primitive, in_tensors, out_tensors, name) {}
|
||||
|
||||
int InitParams() override;
|
||||
|
||||
int BuildLayer() override;
|
||||
|
||||
int HandleAxis() override;
|
||||
|
||||
private:
|
||||
const schema::Softmax *softmax_prim_;
|
||||
int axis_;
|
||||
};
|
||||
} // namespace mindspore
|
||||
#endif // MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_COREML_OP_SOFTMAX_COREML_H_
|
|
@ -0,0 +1,37 @@
|
|||
/**
|
||||
* Copyright 2022 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "src/runtime/delegate/coreml/op/transpose_coreml.h"
|
||||
namespace mindspore {
|
||||
int TransposeCoreMLOp::IsSupport() {
|
||||
MS_CHECK_GE(in_tensors_.size(), kInputSize1, RET_NOT_SUPPORT);
|
||||
auto perm_tensor = in_tensors_.at(1);
|
||||
if (!perm_tensor.IsConst()) {
|
||||
MS_LOG(WARNING) << "CoreML transpose must get fixed axis values.";
|
||||
return RET_NOT_SUPPORT;
|
||||
}
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
int TransposeCoreMLOp::BuildLayer() {
|
||||
MS_ASSERT(op_ != nullptr);
|
||||
auto transpose_param = op_->mutable_transpose();
|
||||
for (auto perm : perm_) {
|
||||
transpose_param->add_axes(perm);
|
||||
}
|
||||
return RET_OK;
|
||||
}
|
||||
} // namespace mindspore
|
|
@ -0,0 +1,54 @@
|
|||
/**
|
||||
* Copyright 2022 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#ifndef MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_COREML_OP_TRANSPOSE_COREML_H_
|
||||
#define MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_COREML_OP_TRANSPOSE_COREML_H_
|
||||
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#include "src/runtime/delegate/coreml/op/coreml_op.h"
|
||||
namespace mindspore {
|
||||
class TransposeCoreMLOp : public CoreMLOp {
|
||||
public:
|
||||
TransposeCoreMLOp(const schema::Primitive *primitive, const std::vector<mindspore::MSTensor> &in_tensors,
|
||||
const std::vector<mindspore::MSTensor> &out_tensors, std::string name)
|
||||
: CoreMLOp(primitive, in_tensors, out_tensors, name) {
|
||||
MS_ASSERT(in_tensors.size() == kInputSize1);
|
||||
auto perm_tensor = in_tensors.at(1);
|
||||
auto perm_num = perm_tensor.ElementNum();
|
||||
auto perm_data = reinterpret_cast<const int *>(perm_tensor.Data().get());
|
||||
for (size_t i = 0; i < perm_num; i++) {
|
||||
perm_.push_back(perm_data[i]);
|
||||
}
|
||||
}
|
||||
|
||||
TransposeCoreMLOp(const std::vector<mindspore::MSTensor> &in_tensors,
|
||||
const std::vector<mindspore::MSTensor> &out_tensors, std::vector<int> perm, std::string name)
|
||||
: CoreMLOp(nullptr, in_tensors, out_tensors, name) {
|
||||
perm_ = perm;
|
||||
type_ = schema::PrimitiveType_Transpose;
|
||||
}
|
||||
|
||||
int IsSupport() override;
|
||||
|
||||
int BuildLayer() override;
|
||||
|
||||
std::vector<int> GetPerm() { return perm_; }
|
||||
|
||||
protected:
|
||||
std::vector<int> perm_;
|
||||
};
|
||||
} // namespace mindspore
|
||||
#endif // MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_COREML_OP_TRANSPOSE_COREML_H_
|
|
@ -0,0 +1,38 @@
|
|||
/**
|
||||
* Copyright 2022 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "src/runtime/delegate/coreml/op/unsqueeze_coreml.h"
|
||||
namespace mindspore {
|
||||
int UnsqueezeCoreMLOp::InitParams() {
|
||||
unsqueeze_prim_ = op_primitive_->value_as_Unsqueeze();
|
||||
if (unsqueeze_prim_ == nullptr) {
|
||||
MS_LOG(ERROR) << "Get null primitive value for op ." << name_;
|
||||
return RET_ERROR;
|
||||
}
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
int UnsqueezeCoreMLOp::BuildLayer() {
|
||||
MS_ASSERT(op_ != nullptr && unsqueeze_prim_ != nullptr);
|
||||
auto expanddims_param = op_->mutable_expanddims();
|
||||
MS_CHECK_TRUE_MSG(unsqueeze_prim_->axis() != nullptr, RET_ERROR, "Unsqueeze axis is null!");
|
||||
auto axes = std::vector<int>(unsqueeze_prim_->axis()->begin(), unsqueeze_prim_->axis()->end());
|
||||
for (auto axis : axes) {
|
||||
expanddims_param->add_axes(axis);
|
||||
}
|
||||
return RET_OK;
|
||||
}
|
||||
} // namespace mindspore
|
|
@ -0,0 +1,37 @@
|
|||
/**
|
||||
* Copyright 2022 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#ifndef MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_COREML_OP_UNSQUEEZE_COREML_H_
|
||||
#define MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_COREML_OP_UNSQUEEZE_COREML_H_
|
||||
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#include "src/runtime/delegate/coreml/op/coreml_op.h"
|
||||
namespace mindspore {
|
||||
class UnsqueezeCoreMLOp : public CoreMLOp {
|
||||
public:
|
||||
UnsqueezeCoreMLOp(const schema::Primitive *primitive, const std::vector<mindspore::MSTensor> &in_tensors,
|
||||
const std::vector<mindspore::MSTensor> &out_tensors, std::string name)
|
||||
: CoreMLOp(primitive, in_tensors, out_tensors, name) {}
|
||||
|
||||
int InitParams() override;
|
||||
|
||||
int BuildLayer() override;
|
||||
|
||||
private:
|
||||
const schema::Unsqueeze *unsqueeze_prim_;
|
||||
};
|
||||
} // namespace mindspore
|
||||
#endif // MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_COREML_OP_UNSQUEEZE_COREML_H_
|
|
@ -0,0 +1,37 @@
|
|||
/**
|
||||
* Copyright 2022 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_COREML_PASS_COREML_BASE_PASS_H_
|
||||
#define MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_COREML_PASS_COREML_BASE_PASS_H_
|
||||
#include <string>
|
||||
#include "src/runtime/delegate/coreml/coreml_graph.h"
|
||||
|
||||
namespace mindspore {
|
||||
class CoreMLBasePass {
|
||||
public:
|
||||
virtual int Run(CoreMLGraph *subgraph) = 0;
|
||||
|
||||
virtual ~CoreMLBasePass() = default;
|
||||
|
||||
std::string name() { return name_; }
|
||||
|
||||
protected:
|
||||
std::string name_;
|
||||
CoreMLGraph *subgraph_ = nullptr;
|
||||
};
|
||||
} // namespace mindspore
|
||||
|
||||
#endif // MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_COREML_PASS_COREML_BASE_PASS_H_
|
|
@ -0,0 +1,211 @@
|
|||
/**
|
||||
* Copyright 2022 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#include "src/runtime/delegate/coreml/pass/coreml_format_trans_pass.h"
|
||||
#include <vector>
|
||||
#include "src/runtime/delegate/coreml/pass/coreml_pass_utils.h"
|
||||
|
||||
using mindspore::lite::RET_ERROR;
|
||||
using mindspore::lite::RET_OK;
|
||||
|
||||
namespace mindspore {
|
||||
std::set<mindspore::schema::PrimitiveType> nchw_nodes = {
|
||||
schema::PrimitiveType_Conv2DFusion, schema::PrimitiveType_Conv2dTransposeFusion, schema::PrimitiveType_Resize,
|
||||
schema::PrimitiveType_MaxPoolFusion, schema::PrimitiveType_AvgPoolFusion, schema::PrimitiveType_ScaleFusion,
|
||||
schema::PrimitiveType_CropAndResize, schema::PrimitiveType_InstanceNorm};
|
||||
|
||||
int CoreMLFormatTransPass::InsertPreNodes(CoreMLOp *op, std::vector<CoreMLOp *> *trans_ops) {
|
||||
bool is_input_op = op->in_ops().empty();
|
||||
// not always single input (like CropAndResize), but we care about the input with 4d.
|
||||
auto it = std::find_if(op->in_ops().begin(), op->in_ops().end(), [](CoreMLOp *k) {
|
||||
return k->outputs().size() > 0 && k->outputs()[0].Shape().size() == COMM_SHAPE_SIZE;
|
||||
});
|
||||
if (!is_input_op && it == op->in_ops().end()) {
|
||||
MS_LOG(ERROR) << "CoreML Transform pass does not find in op with 4d output";
|
||||
return RET_ERROR;
|
||||
}
|
||||
if (is_input_op || nchw_nodes.find((*it)->type()) == nchw_nodes.end()) {
|
||||
CoreMLOp *pre_op = nullptr;
|
||||
if (!is_input_op) {
|
||||
pre_op = *it;
|
||||
}
|
||||
|
||||
// Create pre transform op's out tensor.
|
||||
auto name = op->name() + "_pre_trans" + "_Nhwc2Nchw_" + std::to_string(total++);
|
||||
auto nhwc_shape = op->inputs()[0].Shape();
|
||||
std::vector<int64_t> nchw_shape = {nhwc_shape[kNHWC_N], nhwc_shape[kNHWC_C], nhwc_shape[kNHWC_H],
|
||||
nhwc_shape[kNHWC_W]};
|
||||
auto tensor =
|
||||
mindspore::MSTensor::CreateTensor(name + "/output0", op->inputs()[0].DataType(), nchw_shape, nullptr, 0);
|
||||
if (tensor == nullptr) {
|
||||
MS_LOG(ERROR) << "New nchw tensor failed when inserting pre nhwc2nchw op.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
tensor->SetFormat(Format::NCHW);
|
||||
std::vector<mindspore::MSTensor> pre_trans_outputs = {*tensor};
|
||||
all_tensors_->push_back(tensor);
|
||||
|
||||
// Create pre transform op: Nhwc2Nchw
|
||||
auto *trans_op = CoreMLPassUtils::CreateNhwc2NchwOp({op->inputs()[0]}, pre_trans_outputs, name);
|
||||
if (trans_op == nullptr) {
|
||||
MS_LOG(ERROR) << "Create Nhwc2Nchw transpose op failed.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
trans_ops->push_back(trans_op);
|
||||
|
||||
// Set in_ops, out_ops, inputs, outputs for transform op
|
||||
std::vector<CoreMLOp *> pre_trans_in_ops;
|
||||
if (!is_input_op) {
|
||||
pre_trans_in_ops = {pre_op};
|
||||
}
|
||||
CoreMLPassUtils::UpdateOp(trans_op, pre_trans_in_ops, {op}, trans_op->inputs(), pre_trans_outputs);
|
||||
|
||||
if (pre_op != nullptr) {
|
||||
CoreMLPassUtils::UpdateNH2NCTransNodePreOp(pre_op, trans_op, op);
|
||||
}
|
||||
CoreMLPassUtils::UpdateNH2NCTransNodePostOp(trans_op, op);
|
||||
}
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
int CoreMLFormatTransPass::InsertPostNodes(CoreMLOp *op, std::vector<CoreMLOp *> *trans_ops) {
|
||||
bool is_output_op = false;
|
||||
if (op->out_ops().empty() ||
|
||||
find(subgraph_->outputs().begin(), subgraph_->outputs().end(), op->outputs()[0]) != subgraph_->outputs().end()) {
|
||||
is_output_op = true;
|
||||
}
|
||||
// Get the post op that need insert trans op.
|
||||
// If no need for inserting trans op, the post op must be coreml and in trans_nodes.
|
||||
std::vector<CoreMLOp *> post_insert_ops;
|
||||
std::vector<CoreMLOp *> post_non_insert_ops;
|
||||
for (int i = 0; i < op->out_ops().size(); i++) {
|
||||
auto post_op = op->out_ops()[i];
|
||||
if (nchw_nodes.find(post_op->type()) == nchw_nodes.end()) {
|
||||
post_insert_ops.push_back(post_op);
|
||||
} else {
|
||||
post_non_insert_ops.push_back(post_op);
|
||||
}
|
||||
}
|
||||
if (!is_output_op && post_insert_ops.empty()) {
|
||||
return RET_OK;
|
||||
}
|
||||
// Create post transform op's in tensor.
|
||||
auto name = op->name() + "_post_trans" + "_Nchw2Nhwc" + std::to_string(total++);
|
||||
|
||||
auto nhwc_shape = op->outputs()[0].Shape();
|
||||
std::vector<int64_t> nchw_shape = {nhwc_shape[kNHWC_N], nhwc_shape[kNHWC_C], nhwc_shape[kNHWC_H],
|
||||
nhwc_shape[kNHWC_W]};
|
||||
auto nc2nh_tensor =
|
||||
mindspore::MSTensor::CreateTensor(name + "/input0", op->outputs()[0].DataType(), nchw_shape, nullptr, 0);
|
||||
if (nc2nh_tensor == nullptr) {
|
||||
MS_LOG(ERROR) << "New nchw tensor failed when inserting post nchw2nhwc op.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
nc2nh_tensor->SetFormat(Format::NCHW);
|
||||
all_tensors_->push_back(nc2nh_tensor);
|
||||
|
||||
if (is_output_op) {
|
||||
std::vector<mindspore::MSTensor> nc2nh_outputs{op->outputs().at(0)};
|
||||
// Create post transform op: Nchw2Nhwc
|
||||
auto *post_trans_op = CoreMLPassUtils::CreateNchw2NhwcOp({*nc2nh_tensor}, nc2nh_outputs, name);
|
||||
if (post_trans_op == nullptr) {
|
||||
MS_LOG(ERROR) << "Create Nchw2Nhwc transpose op failed.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
// Set in_ops, out_ops, inputs, outputs for transform op
|
||||
CoreMLPassUtils::UpdateOp(post_trans_op, {op}, {}, post_trans_op->inputs(), post_trans_op->outputs());
|
||||
trans_ops->push_back(post_trans_op);
|
||||
}
|
||||
// for each to-be-insert out op, create one transpose op, one perm tensor, one out tensor
|
||||
// but using same one in_tensor.
|
||||
for (auto i = 0; i < post_insert_ops.size(); ++i) {
|
||||
auto post_insert_op = post_insert_ops.at(i);
|
||||
// nc2nh op out tensor: abandon original out_tensor, all ops use newly created out tensor.
|
||||
std::vector<mindspore::MSTensor> nc2nh_outputs{};
|
||||
auto origin_out_tensor = op->outputs().at(0);
|
||||
auto out_tensor_name = op->name() + "_post_trans" + "_Nchw2Nhwc_" + std::to_string(i) + "_out_tensor";
|
||||
auto out_tensor = mindspore::MSTensor::CreateTensor(out_tensor_name, origin_out_tensor.DataType(),
|
||||
origin_out_tensor.Shape(), nullptr, 0);
|
||||
if (out_tensor == nullptr) {
|
||||
MS_LOG(ERROR) << "New nhwc tensor failed when inserting post nchw2nhwc op.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
out_tensor->SetFormat(Format::NHWC);
|
||||
all_tensors_->push_back(out_tensor);
|
||||
nc2nh_outputs.push_back(*out_tensor);
|
||||
|
||||
// Create post transform op: Nchw2Nhwc
|
||||
auto *post_trans_op =
|
||||
CoreMLPassUtils::CreateNchw2NhwcOp({*nc2nh_tensor}, nc2nh_outputs, name + "_" + std::to_string(i));
|
||||
if (post_trans_op == nullptr) {
|
||||
MS_LOG(ERROR) << "Create Nchw2Nhwc transpose op failed.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
// Set in_ops, out_ops, inputs, outputs for transform op
|
||||
CoreMLPassUtils::UpdateOp(post_trans_op, {op}, {post_insert_op}, post_trans_op->inputs(), post_trans_op->outputs());
|
||||
trans_ops->push_back(post_trans_op);
|
||||
// update post op inputs in_ops
|
||||
CoreMLPassUtils::UpdateNC2NHTransNodePostOp(op, post_trans_op, post_insert_op, origin_out_tensor);
|
||||
}
|
||||
// for those non-insert post ops, update their in_tensor
|
||||
for (auto non_insert_op : post_non_insert_ops) {
|
||||
auto inputs = non_insert_op->inputs();
|
||||
std::replace(inputs.begin(), inputs.end(), op->outputs().at(0), *nc2nh_tensor);
|
||||
non_insert_op->set_inputs(inputs);
|
||||
}
|
||||
// update origin op's out tensor and out op
|
||||
CoreMLPassUtils::UpdateNC2NHTransNodePreOp(op, *trans_ops, post_insert_ops);
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
int CoreMLFormatTransPass::Run(CoreMLGraph *subgraph) {
|
||||
subgraph_ = subgraph;
|
||||
all_ops_ = subgraph_->GetOps();
|
||||
all_tensors_ = subgraph_->GetInsertTensors();
|
||||
for (size_t i = 0; i < all_ops_->size();) {
|
||||
auto op = (*all_ops_)[i];
|
||||
if (nchw_nodes.find(op->type()) == nchw_nodes.end()) {
|
||||
i++;
|
||||
continue;
|
||||
}
|
||||
if (op->type() == schema::PrimitiveType_InstanceNorm && op->inputs().front().format() == mindspore::Format::NCHW) {
|
||||
i++;
|
||||
continue;
|
||||
}
|
||||
// insert pre_ops before op in vector
|
||||
// modify loop index add (pre_ops.size() + 1) to the post_ops insert location
|
||||
std::vector<CoreMLOp *> pre_ops;
|
||||
auto ret = InsertPreNodes(op, &pre_ops);
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(ERROR) << "Insert nhwc2nchw op before op " << op->name() << " failed.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
all_ops_->insert(all_ops_->begin() + i, pre_ops.begin(), pre_ops.end());
|
||||
i += (pre_ops.size() + 1);
|
||||
|
||||
// insert post_ops after op in vector
|
||||
// modify loop index add post_ops.size() to the next op in the origin vector
|
||||
std::vector<CoreMLOp *> post_ops;
|
||||
ret = InsertPostNodes(op, &post_ops);
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(ERROR) << "Insert nchw2nhwc op after op " << op->name() << " failed.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
all_ops_->insert(all_ops_->begin() + i, post_ops.begin(), post_ops.end());
|
||||
i += post_ops.size();
|
||||
}
|
||||
return RET_OK;
|
||||
}
|
||||
} // namespace mindspore
|
|
@ -0,0 +1,43 @@
|
|||
/**
|
||||
* Copyright 2022 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_COREML_PASS_COREML_FORMAT_TRANS_PASS_H_
|
||||
#define MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_COREML_PASS_COREML_FORMAT_TRANS_PASS_H_
|
||||
|
||||
#include <set>
|
||||
#include <vector>
|
||||
#include "src/runtime/delegate/coreml/op/coreml_op.h"
|
||||
#include "src/runtime/delegate/coreml/pass/coreml_base_pass.h"
|
||||
|
||||
namespace mindspore {
|
||||
class CoreMLFormatTransPass : public CoreMLBasePass {
|
||||
public:
|
||||
CoreMLFormatTransPass() { name_ = "CoreMLFormatTransPass"; }
|
||||
|
||||
int Run(CoreMLGraph *subgraph) override;
|
||||
|
||||
private:
|
||||
int InsertPreNodes(CoreMLOp *op, std::vector<CoreMLOp *> *trans_ops);
|
||||
|
||||
int InsertPostNodes(CoreMLOp *op, std::vector<CoreMLOp *> *trans_ops);
|
||||
|
||||
private:
|
||||
int total = 0;
|
||||
std::vector<CoreMLOp *> *all_ops_ = nullptr;
|
||||
std::vector<mindspore::MSTensor *> *all_tensors_ = nullptr;
|
||||
};
|
||||
} // namespace mindspore
|
||||
#endif // MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_COREML_PASS_COREML_FORMAT_TRANS_PASS_H_
|
|
@ -0,0 +1,384 @@
|
|||
/**
|
||||
* Copyright 2022 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "src/runtime/delegate/coreml/pass/coreml_fusion_pass.h"
|
||||
#include <vector>
|
||||
#include "src/runtime/delegate/coreml/pass/coreml_pass_utils.h"
|
||||
|
||||
using mindspore::lite::RET_ERROR;
|
||||
using mindspore::lite::RET_OK;
|
||||
|
||||
namespace mindspore {
|
||||
bool CheckFusion(CoreMLOp *cur_op, const std::vector<mindspore::MSTensor> &graph_outputs) {
|
||||
if (cur_op->in_ops().empty() || cur_op->out_ops().empty()) {
|
||||
return false;
|
||||
}
|
||||
auto pre_flag = std::all_of(cur_op->in_ops().begin(), cur_op->in_ops().end(), [](CoreMLOp *in_op) {
|
||||
return CoreMLPassUtils::IsNchw2Nhwc(in_op) && in_op->out_ops().size() == 1;
|
||||
});
|
||||
if (!pre_flag) {
|
||||
return false;
|
||||
}
|
||||
auto post_flag = std::all_of(cur_op->out_ops().begin(), cur_op->out_ops().end(),
|
||||
[](CoreMLOp *out_op) { return CoreMLPassUtils::IsNhwc2Nchw(out_op); });
|
||||
if (!post_flag) {
|
||||
return false;
|
||||
}
|
||||
for (auto out_op : cur_op->out_ops()) {
|
||||
// If the pattern is "nc2nh->cur_op->nh2nc" while the output tensors of "cur_op" and "nh2nc" are both graph output,
|
||||
// the trans ops can not be fused since it will cause the missing of graph output.
|
||||
if (out_op->out_ops().empty() &&
|
||||
std::find(graph_outputs.begin(), graph_outputs.end(), out_op->inputs().at(0)) != graph_outputs.end()) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool CheckFormatFusion(CoreMLOp *cur_op) {
|
||||
if (cur_op->out_ops().empty()) {
|
||||
return false;
|
||||
}
|
||||
if (CoreMLPassUtils::IsNhwc2Nchw(cur_op)) {
|
||||
return std::all_of(cur_op->out_ops().begin(), cur_op->out_ops().end(),
|
||||
[](CoreMLOp *op) { return CoreMLPassUtils::IsNchw2Nhwc(op); });
|
||||
}
|
||||
if (CoreMLPassUtils::IsNchw2Nhwc(cur_op)) {
|
||||
return std::all_of(cur_op->out_ops().begin(), cur_op->out_ops().end(),
|
||||
[](CoreMLOp *op) { return CoreMLPassUtils::IsNhwc2Nchw(op); });
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
void CoreMLFusionPass::RemoveAndFreeOp(CoreMLOp *cur_op) {
|
||||
auto itr = find(all_ops_->begin(), all_ops_->end(), cur_op);
|
||||
if (itr != all_ops_->end()) {
|
||||
all_ops_->erase(itr);
|
||||
}
|
||||
delete cur_op;
|
||||
}
|
||||
|
||||
int CoreMLFusionPass::UpdatePreOps(CoreMLOp *cur_op) {
|
||||
auto cur_in_ops = cur_op->in_ops();
|
||||
for (auto in_op : cur_op->in_ops()) {
|
||||
// graph in op
|
||||
if (in_op->in_ops().empty()) {
|
||||
cur_in_ops.erase(find(cur_in_ops.begin(), cur_in_ops.end(), in_op));
|
||||
} else {
|
||||
auto pre_op = in_op->in_ops()[0];
|
||||
auto pre_out_ops = pre_op->out_ops();
|
||||
for (size_t i = 0; i < pre_out_ops.size(); i++) {
|
||||
if (pre_out_ops[i] == in_op) {
|
||||
pre_out_ops[i] = cur_op;
|
||||
break;
|
||||
}
|
||||
}
|
||||
pre_op->set_out_ops(pre_out_ops);
|
||||
|
||||
for (size_t i = 0; i < cur_in_ops.size(); i++) {
|
||||
if (cur_in_ops[i] == in_op) {
|
||||
cur_in_ops[i] = pre_op;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
RemoveAndFreeOp(in_op);
|
||||
}
|
||||
cur_op->set_in_ops(cur_in_ops);
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
int CoreMLFusionPass::UpdatePostOps(CoreMLOp *cur_op) {
|
||||
auto cur_out_ops = cur_op->out_ops();
|
||||
for (auto out_op : cur_op->out_ops()) {
|
||||
// graph out op
|
||||
if (out_op->out_ops().empty()) {
|
||||
cur_out_ops.erase(find(cur_out_ops.begin(), cur_out_ops.end(), out_op));
|
||||
} else {
|
||||
auto post_op = out_op->out_ops()[0];
|
||||
auto post_in_ops = post_op->in_ops();
|
||||
for (size_t i = 0; i < post_in_ops.size(); i++) {
|
||||
if (post_in_ops[i] == out_op) {
|
||||
post_in_ops[i] = cur_op;
|
||||
break;
|
||||
}
|
||||
}
|
||||
post_op->set_in_ops(post_in_ops);
|
||||
|
||||
for (size_t i = 0; i < cur_out_ops.size(); i++) {
|
||||
if (cur_out_ops[i] == out_op) {
|
||||
cur_out_ops[i] = post_op;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
RemoveAndFreeOp(out_op);
|
||||
}
|
||||
cur_op->set_out_ops(cur_out_ops);
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
int UpdatePreTensors(CoreMLOp *cur_op) {
|
||||
auto in_tensors_vec = cur_op->inputs();
|
||||
for (auto in_op : cur_op->in_ops()) {
|
||||
if (in_op->inputs().empty() || in_op->outputs().empty()) {
|
||||
MS_LOG(ERROR) << "in_tensors or out_tensors of input op is empty.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
mindspore::MSTensor cur_tensor;
|
||||
auto in_tensor = in_op->inputs()[0];
|
||||
auto out_tensor = in_op->outputs()[0];
|
||||
if (!in_op->in_ops().empty()) {
|
||||
auto pre_op = in_op->in_ops()[0];
|
||||
for (size_t i = 0; i < pre_op->outputs().size(); i++) {
|
||||
if (pre_op->outputs()[i] == in_tensor) {
|
||||
cur_tensor = pre_op->outputs()[i];
|
||||
break;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// graph input
|
||||
cur_tensor = in_tensor;
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < in_tensors_vec.size(); i++) {
|
||||
if (in_tensors_vec[i] == out_tensor) {
|
||||
in_tensors_vec[i] = cur_tensor;
|
||||
}
|
||||
}
|
||||
}
|
||||
cur_op->set_inputs(in_tensors_vec);
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
int UpdatePostTensors(CoreMLOp *cur_op) {
|
||||
mindspore::MSTensor new_post_input;
|
||||
for (auto out_op : cur_op->out_ops()) {
|
||||
auto in_tensor = out_op->inputs()[0];
|
||||
auto out_tensor = out_op->outputs()[0];
|
||||
auto nhwc_shape = in_tensor.Shape();
|
||||
if (in_tensor.format() == Format::NHWC) {
|
||||
MS_CHECK_TRUE_MSG(nhwc_shape.size() == COMM_SHAPE_SIZE, RET_ERROR, "Invalid transpose dim size!");
|
||||
in_tensor.SetShape({nhwc_shape[kNHWC_N], nhwc_shape[kNHWC_C], nhwc_shape[kNHWC_H], nhwc_shape[kNHWC_W]});
|
||||
in_tensor.SetFormat(Format::NCHW);
|
||||
}
|
||||
// out_op is a graph output op
|
||||
if (out_op->out_ops().empty()) {
|
||||
auto out_tensors_vec = cur_op->outputs();
|
||||
for (size_t i = 0; i < out_tensors_vec.size(); i++) {
|
||||
if (out_tensors_vec[i] == in_tensor) {
|
||||
out_tensors_vec[i] = out_op->outputs()[0];
|
||||
}
|
||||
}
|
||||
cur_op->set_outputs(out_tensors_vec);
|
||||
// exist other out_ops using the same tensor as the current out_op, note that the other out_op has likely been
|
||||
// updated, which mean it may be not a Transpose op anymore.
|
||||
for (auto other_out_op : cur_op->out_ops()) {
|
||||
auto other_in_tensors_vec = other_out_op->inputs();
|
||||
for (size_t i = 0; i < other_in_tensors_vec.size(); i++) {
|
||||
if (other_in_tensors_vec[i] == in_tensor) {
|
||||
other_in_tensors_vec[i] = out_op->outputs()[0];
|
||||
}
|
||||
}
|
||||
other_out_op->set_inputs(other_in_tensors_vec);
|
||||
}
|
||||
}
|
||||
// out_op is not a graph out op
|
||||
for (auto post_op : out_op->out_ops()) {
|
||||
auto in_tensors_vec = post_op->inputs();
|
||||
for (size_t i = 0; i < in_tensors_vec.size(); i++) {
|
||||
if (in_tensors_vec[i] == out_tensor) {
|
||||
in_tensors_vec[i] = in_tensor;
|
||||
}
|
||||
}
|
||||
post_op->set_inputs(in_tensors_vec);
|
||||
}
|
||||
}
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
int CoreMLFusionPass::UpdateOp(CoreMLOp *cur_op) {
|
||||
if (cur_op == nullptr) {
|
||||
MS_LOG(ERROR) << "kernel is nullptr.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
auto ret = UpdatePreTensors(cur_op);
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(ERROR) << "UpdatePreTensors failed.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
ret = UpdatePostTensors(cur_op);
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(ERROR) << "UpdatePostTensors failed.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
ret = UpdatePreOps(cur_op);
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(ERROR) << "UpdatePreOps failed.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
ret = UpdatePostOps(cur_op);
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(ERROR) << "UpdatePostOps failed.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
int CoreMLFusionPass::CommonFusion(CoreMLOp *cur_op) {
|
||||
if (cur_op == nullptr) {
|
||||
return RET_ERROR;
|
||||
}
|
||||
auto ret = UpdateOp(cur_op);
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(ERROR) << "UpdateOp failed.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
ret = cur_op->HandleAxis();
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(ERROR) << "HandleAxis failed.";
|
||||
return ret;
|
||||
}
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
void UpdateOutOpsOfPreOp(CoreMLOp *cur_op, bool found_graph_out_tensor, const mindspore::MSTensor &graph_out_tensor,
|
||||
const std::vector<CoreMLOp *> &pre_insert_ops) {
|
||||
MS_ASSERT(cur_op != nullptr);
|
||||
auto is_graph_input = cur_op->in_ops().empty();
|
||||
auto cur_op_in_tensor = cur_op->inputs()[0];
|
||||
if (!is_graph_input) {
|
||||
auto pre_op = cur_op->in_ops()[0];
|
||||
auto pre_out_ops = pre_op->out_ops();
|
||||
size_t cur_op_index = 0;
|
||||
for (size_t index = 0; index < pre_out_ops.size(); index++) {
|
||||
if (pre_out_ops[index] == cur_op) {
|
||||
pre_out_ops.erase(pre_out_ops.begin() + index);
|
||||
cur_op_index = index;
|
||||
index--;
|
||||
} else if (found_graph_out_tensor) {
|
||||
// only in this case, the output of pre_op is specified to 2nd trans op's output and pre_out_ops need update.
|
||||
auto tensors_vec = pre_out_ops[index]->inputs();
|
||||
for (size_t i = 0; i < tensors_vec.size(); i++) {
|
||||
if (tensors_vec[i] == cur_op_in_tensor) {
|
||||
tensors_vec[i] = graph_out_tensor;
|
||||
break;
|
||||
}
|
||||
}
|
||||
pre_out_ops[index]->set_inputs(tensors_vec);
|
||||
}
|
||||
}
|
||||
pre_out_ops.insert(pre_out_ops.begin() + cur_op_index, pre_insert_ops.begin(), pre_insert_ops.end());
|
||||
pre_op->set_out_ops(pre_out_ops);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
int CoreMLFusionPass::FormatFusion(CoreMLOp *cur_op) {
|
||||
CHECK_NULL_RETURN(cur_op);
|
||||
auto is_graph_input = cur_op->in_ops().empty();
|
||||
auto cur_op_in_tensor = cur_op->inputs()[0];
|
||||
std::vector<CoreMLOp *> pre_insert_ops;
|
||||
CoreMLOp *pre_op = nullptr;
|
||||
if (!is_graph_input) {
|
||||
pre_op = cur_op->in_ops()[0];
|
||||
}
|
||||
mindspore::MSTensor graph_out_tensor;
|
||||
bool found_graph_out_tensor = false;
|
||||
auto graph_outputs = subgraph_->outputs();
|
||||
// if the output of second trans op(s) is graph output, find it out and use it as the pre-op's output.
|
||||
for (const auto &sec_op : cur_op->out_ops()) {
|
||||
if (std::find(graph_outputs.begin(), graph_outputs.end(), sec_op->outputs()[0]) != graph_outputs.end()) {
|
||||
graph_out_tensor = sec_op->outputs()[0];
|
||||
if (!is_graph_input) {
|
||||
found_graph_out_tensor = true;
|
||||
// cur_op is the first trans op, it's input op num and input tensor num must be 1
|
||||
pre_op->set_outputs({graph_out_tensor});
|
||||
// in fp16 mode, tensor data type fp16 need to be changed back.
|
||||
auto tensor = pre_op->outputs()[0];
|
||||
if (tensor.DataType() == DataType::kNumberTypeFloat16) {
|
||||
tensor.SetDataType(DataType::kNumberTypeFloat32);
|
||||
}
|
||||
break;
|
||||
} else {
|
||||
MS_LOG(WARNING) << "Existing graph output equivalent to graph input, which is unsupported now.";
|
||||
return RET_OK;
|
||||
}
|
||||
}
|
||||
}
|
||||
for (const auto &trans_op : cur_op->out_ops()) {
|
||||
for (const auto &post_op : trans_op->out_ops()) {
|
||||
// update tensor
|
||||
auto tensors_vec = post_op->inputs();
|
||||
for (size_t i = 0; i < tensors_vec.size(); i++) {
|
||||
if (tensors_vec[i] == trans_op->outputs()[0]) {
|
||||
tensors_vec[i] = found_graph_out_tensor ? graph_out_tensor : cur_op_in_tensor;
|
||||
break;
|
||||
}
|
||||
}
|
||||
post_op->set_inputs(tensors_vec);
|
||||
|
||||
// update op
|
||||
auto post_in_ops = post_op->in_ops();
|
||||
for (size_t i = 0; i < post_in_ops.size(); i++) {
|
||||
if (post_in_ops[i] == trans_op) {
|
||||
if (is_graph_input) {
|
||||
post_in_ops.erase(post_in_ops.begin() + i);
|
||||
} else {
|
||||
post_in_ops[i] = pre_op;
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
post_op->set_in_ops(post_in_ops);
|
||||
pre_insert_ops.push_back(post_op);
|
||||
}
|
||||
RemoveAndFreeOp(trans_op);
|
||||
}
|
||||
UpdateOutOpsOfPreOp(cur_op, found_graph_out_tensor, graph_out_tensor, pre_insert_ops);
|
||||
RemoveAndFreeOp(cur_op);
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
int CoreMLFusionPass::Run(CoreMLGraph *subgraph) {
|
||||
subgraph_ = subgraph;
|
||||
all_ops_ = subgraph->GetOps();
|
||||
for (size_t i = 0; i < all_ops_->size(); i++) {
|
||||
auto cur_op = (*all_ops_)[i];
|
||||
auto ret = RET_OK;
|
||||
if (CheckFusion(cur_op, subgraph->outputs())) {
|
||||
i -= cur_op->in_ops().size();
|
||||
ret = CommonFusion(cur_op);
|
||||
}
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(ERROR) << "Fusion failed.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
}
|
||||
for (size_t i = 0; i < all_ops_->size(); ++i) {
|
||||
auto cur_op = (*all_ops_)[i];
|
||||
if (CheckFormatFusion(cur_op)) {
|
||||
i--;
|
||||
auto ret = FormatFusion(cur_op);
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(ERROR) << "FormatFusion failed.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
}
|
||||
}
|
||||
return RET_OK;
|
||||
}
|
||||
} // namespace mindspore
|
|
@ -0,0 +1,42 @@
|
|||
/**
|
||||
* Copyright 2022 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_COREML_PASS_COREML_FUSION_PASS_H_
|
||||
#define MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_COREML_PASS_COREML_FUSION_PASS_H_
|
||||
#include <vector>
|
||||
#include "src/runtime/delegate/coreml/op/coreml_op.h"
|
||||
#include "src/runtime/delegate/coreml/pass/coreml_base_pass.h"
|
||||
|
||||
namespace mindspore {
|
||||
class CoreMLFusionPass : public CoreMLBasePass {
|
||||
public:
|
||||
CoreMLFusionPass() { name_ = "CoreMLFusionPass"; }
|
||||
|
||||
int Run(CoreMLGraph *subgraph) override;
|
||||
|
||||
protected:
|
||||
int UpdatePreOps(CoreMLOp *cur_op);
|
||||
int UpdatePostOps(CoreMLOp *cur_op);
|
||||
void RemoveAndFreeOp(CoreMLOp *cur_op);
|
||||
int UpdateOp(CoreMLOp *cur_op);
|
||||
int CommonFusion(CoreMLOp *cur_op);
|
||||
int FormatFusion(CoreMLOp *cur_op);
|
||||
|
||||
private:
|
||||
std::vector<CoreMLOp *> *all_ops_;
|
||||
};
|
||||
} // namespace mindspore
|
||||
#endif // MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_COREML_PASS_COREML_FUSION_PASS_H_
|
|
@ -0,0 +1,44 @@
|
|||
/**
|
||||
* Copyright 2022 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "src/runtime/delegate/coreml/pass/coreml_pass_manager.h"
|
||||
#include "include/errorcode.h"
|
||||
#include "src/common/log_adapter.h"
|
||||
|
||||
using mindspore::lite::RET_ERROR;
|
||||
using mindspore::lite::RET_OK;
|
||||
|
||||
namespace mindspore {
|
||||
void CoreMLPassManager::AddPass(CoreMLBasePass *pass) { all_pass_.push_back(pass); }
|
||||
|
||||
int CoreMLPassManager::RunPass(CoreMLGraph *subgraph) {
|
||||
for (auto pass : all_pass_) {
|
||||
auto ret = pass->Run(subgraph);
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(ERROR) << "CoreML Pass Run failed. Pass name is:" << pass->name() << " for subgraph " << subgraph->name();
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
void CoreMLPassManager::Clear() {
|
||||
for (auto pass : all_pass_) {
|
||||
delete pass;
|
||||
}
|
||||
all_pass_.clear();
|
||||
}
|
||||
} // namespace mindspore
|
|
@ -0,0 +1,41 @@
|
|||
/**
|
||||
* Copyright 2022 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_COREML_PASS_COREML_PASS_MANAGER_H_
|
||||
#define MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_COREML_PASS_COREML_PASS_MANAGER_H_
|
||||
#include <vector>
|
||||
#include "src/runtime/delegate/coreml/pass/coreml_base_pass.h"
|
||||
namespace mindspore {
|
||||
class CoreMLPassManager {
|
||||
public:
|
||||
static CoreMLPassManager *GetInstance() {
|
||||
static CoreMLPassManager pass_manager;
|
||||
return &pass_manager;
|
||||
}
|
||||
|
||||
~CoreMLPassManager() { Clear(); }
|
||||
|
||||
void AddPass(CoreMLBasePass *pass);
|
||||
|
||||
int RunPass(CoreMLGraph *subgraph);
|
||||
|
||||
void Clear();
|
||||
|
||||
private:
|
||||
std::vector<CoreMLBasePass *> all_pass_{};
|
||||
};
|
||||
} // namespace mindspore
|
||||
#endif // MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_COREML_PASS_COREML_PASS_MANAGER_H_
|
|
@ -0,0 +1,178 @@
|
|||
/**
|
||||
* Copyright 2022 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "src/runtime/delegate/coreml/pass/coreml_pass_utils.h"
|
||||
#include <algorithm>
|
||||
#include "src/runtime/delegate/coreml/op/transpose_coreml.h"
|
||||
|
||||
namespace mindspore {
|
||||
CoreMLOp *CoreMLPassUtils::CreateNchw2NhwcOp(const std::vector<mindspore::MSTensor> &in_tensors,
|
||||
const std::vector<mindspore::MSTensor> &out_tensors,
|
||||
const std::string &name) {
|
||||
auto trans_op = new (std::nothrow) TransposeCoreMLOp(in_tensors, out_tensors, NCHW2NHWC_PERM, name);
|
||||
if (trans_op == nullptr) {
|
||||
MS_LOG(ERROR) << "New Nchw2Nhwc CoreMLOp failed.";
|
||||
return nullptr;
|
||||
}
|
||||
auto ret = trans_op->Init();
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(ERROR) << "Nchw2Nhwc transpose op init failed.";
|
||||
return nullptr;
|
||||
}
|
||||
return trans_op;
|
||||
}
|
||||
|
||||
CoreMLOp *CoreMLPassUtils::CreateNhwc2NchwOp(const std::vector<mindspore::MSTensor> &in_tensors,
|
||||
const std::vector<mindspore::MSTensor> &out_tensors,
|
||||
const std::string &name) {
|
||||
auto trans_op = new (std::nothrow) TransposeCoreMLOp(in_tensors, out_tensors, NHWC2NCHW_PERM, name);
|
||||
if (trans_op == nullptr) {
|
||||
MS_LOG(ERROR) << "New Nhwc2Nchw CoreMLOp failed.";
|
||||
return nullptr;
|
||||
}
|
||||
auto ret = trans_op->Init();
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(ERROR) << "Nhwc2Nchw transpose op init failed.";
|
||||
return nullptr;
|
||||
}
|
||||
return trans_op;
|
||||
}
|
||||
|
||||
void CoreMLPassUtils::UpdateOp(CoreMLOp *op, const std::vector<CoreMLOp *> &in_ops,
|
||||
const std::vector<CoreMLOp *> &out_ops,
|
||||
const std::vector<mindspore::MSTensor> &in_tensors,
|
||||
const std::vector<mindspore::MSTensor> &outputs) {
|
||||
op->set_inputs(in_tensors);
|
||||
op->set_outputs(outputs);
|
||||
op->set_in_ops(in_ops);
|
||||
op->set_out_ops(out_ops);
|
||||
}
|
||||
|
||||
void CoreMLPassUtils::UpdateNH2NCTransNodePreOp(CoreMLOp *pre_op, CoreMLOp *trans_op, CoreMLOp *op) {
|
||||
// For op before trans, update the out_ops; the output tensor of op is the input tensor of trans, no need to update.
|
||||
std::vector<CoreMLOp *> out_ops = pre_op->out_ops();
|
||||
if (op == nullptr) {
|
||||
out_ops.emplace_back(trans_op);
|
||||
} else {
|
||||
for (size_t i = 0; i < out_ops.size(); i++) {
|
||||
if (out_ops[i] == op) {
|
||||
out_ops[i] = trans_op;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
pre_op->set_out_ops(out_ops);
|
||||
}
|
||||
|
||||
void CoreMLPassUtils::UpdateNC2NHTransNodePreOp(CoreMLOp *pre_op, const std::vector<CoreMLOp *> &trans_ops,
|
||||
const std::vector<CoreMLOp *> &ops) {
|
||||
// For op before trans, there may be multiple outputs.
|
||||
auto cur_out_ops = pre_op->out_ops();
|
||||
for (size_t i = 0; i < ops.size(); i++) {
|
||||
auto itr = find(cur_out_ops.begin(), cur_out_ops.end(), ops[i]);
|
||||
if (itr != cur_out_ops.end()) {
|
||||
cur_out_ops.erase(itr);
|
||||
}
|
||||
}
|
||||
std::copy(trans_ops.begin(), trans_ops.end(), std::back_inserter(cur_out_ops));
|
||||
pre_op->set_out_ops(cur_out_ops);
|
||||
// For op before trans, the output tensor is used for output tensor of trans, so replace the output tensor
|
||||
// with the input tensor of trans.
|
||||
pre_op->set_outputs({trans_ops.at(0)->inputs().at(0)});
|
||||
}
|
||||
|
||||
void CoreMLPassUtils::UpdateNH2NCTransNodePostOp(CoreMLOp *trans_op, CoreMLOp *post_op) {
|
||||
auto cur_in_tensors = post_op->inputs();
|
||||
cur_in_tensors[0] = trans_op->outputs()[0];
|
||||
post_op->set_inputs(cur_in_tensors);
|
||||
post_op->set_in_ops({trans_op});
|
||||
}
|
||||
|
||||
void CoreMLPassUtils::UpdateNC2NHTransNodePostOp(CoreMLOp *op, CoreMLOp *trans_op, CoreMLOp *post_op,
|
||||
const mindspore::MSTensor &org_in_tensor) {
|
||||
// The input tensor should be replaced with the output tensor of trans_op.
|
||||
auto post_in_tensors = post_op->inputs();
|
||||
std::replace(post_in_tensors.begin(), post_in_tensors.end(), org_in_tensor, trans_op->outputs().at(0));
|
||||
post_op->set_inputs(post_in_tensors);
|
||||
|
||||
// For post_op after trans, op in in_ops should be replaced with trans_op.
|
||||
auto post_in_ops = post_op->in_ops();
|
||||
if (op == nullptr) {
|
||||
post_in_ops.push_back(trans_op);
|
||||
} else {
|
||||
std::replace(post_in_ops.begin(), post_in_ops.end(), op, trans_op);
|
||||
}
|
||||
post_op->set_in_ops(post_in_ops);
|
||||
}
|
||||
|
||||
bool CoreMLPassUtils::IsNhwc2Nchw(CoreMLOp *op) {
|
||||
if (op == nullptr) {
|
||||
return false;
|
||||
}
|
||||
if (op->type() != schema::PrimitiveType_Transpose) {
|
||||
return false;
|
||||
}
|
||||
auto transpose_op = static_cast<TransposeCoreMLOp *>(op);
|
||||
std::vector<int> perm = transpose_op->GetPerm();
|
||||
std::vector<int> nh2nc_perm = {0, 3, 1, 2};
|
||||
if (perm != nh2nc_perm) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool CoreMLPassUtils::IsNchw2Nhwc(CoreMLOp *op) {
|
||||
if (op == nullptr) {
|
||||
return false;
|
||||
}
|
||||
if (op->type() != schema::PrimitiveType_Transpose) {
|
||||
return false;
|
||||
}
|
||||
auto transpose_op = static_cast<TransposeCoreMLOp *>(op);
|
||||
std::vector<int> perm = transpose_op->GetPerm();
|
||||
std::vector<int> nc2nh_perm = {0, 2, 3, 1};
|
||||
if (perm != nc2nh_perm) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
CoreMLOp *CoreMLPassUtils::OpInputFromOp(CoreMLOp *op, mindspore::MSTensor in_tensor) {
|
||||
// given op and input tensor index, get which op output this tensor.
|
||||
// If input tensor is graph input, return nullptr.
|
||||
if (op == nullptr) {
|
||||
return nullptr;
|
||||
}
|
||||
auto in_ops = op->in_ops();
|
||||
auto output_contain = [in_tensor](CoreMLOp *in_op) {
|
||||
auto outputs = in_op->outputs();
|
||||
return std::find(outputs.begin(), outputs.end(), in_tensor) != outputs.end();
|
||||
};
|
||||
auto it = std::find_if(in_ops.begin(), in_ops.end(), output_contain);
|
||||
if (it == in_ops.end()) {
|
||||
return nullptr;
|
||||
}
|
||||
return *it;
|
||||
}
|
||||
|
||||
std::vector<mindspore::MSTensor> CoreMLPassUtils::GetNonConstInputs(CoreMLOp *op) {
|
||||
MS_CHECK_TRUE_MSG(op != nullptr, {}, "Input op is null!");
|
||||
std::vector<mindspore::MSTensor> non_const_in_tensors;
|
||||
std::copy_if(op->inputs().begin(), op->inputs().end(), std::back_inserter(non_const_in_tensors),
|
||||
[](const auto &tensor) { return !tensor.IsConst(); });
|
||||
return non_const_in_tensors;
|
||||
}
|
||||
} // namespace mindspore
|
|
@ -0,0 +1,56 @@
|
|||
/**
|
||||
* Copyright 2022 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_COREML_PASS_COREML_PASS_UTILS_H_
|
||||
#define MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_COREML_PASS_COREML_PASS_UTILS_H_
|
||||
#include <vector>
|
||||
#include <set>
|
||||
#include <string>
|
||||
#include <unordered_map>
|
||||
#include "src/runtime/delegate/coreml/op/coreml_op.h"
|
||||
#include "src/runtime/delegate/coreml/op/transpose_coreml.h"
|
||||
|
||||
namespace mindspore {
|
||||
class CoreMLPassUtils {
|
||||
public:
|
||||
static CoreMLOp *CreateNchw2NhwcOp(const std::vector<mindspore::MSTensor> &in_tensors,
|
||||
const std::vector<mindspore::MSTensor> &out_tensors, const std::string &name);
|
||||
|
||||
static CoreMLOp *CreateNhwc2NchwOp(const std::vector<mindspore::MSTensor> &in_tensors,
|
||||
const std::vector<mindspore::MSTensor> &out_tensors, const std::string &name);
|
||||
|
||||
static void UpdateOp(CoreMLOp *op, const std::vector<CoreMLOp *> &in_ops, const std::vector<CoreMLOp *> &out_ops,
|
||||
const std::vector<mindspore::MSTensor> &in_tensors,
|
||||
const std::vector<mindspore::MSTensor> &out_tensors);
|
||||
|
||||
static void UpdateNH2NCTransNodePreOp(CoreMLOp *pre_op, CoreMLOp *trans_op, CoreMLOp *op);
|
||||
|
||||
static void UpdateNC2NHTransNodePreOp(CoreMLOp *pre_op, const std::vector<CoreMLOp *> &trans_ops,
|
||||
const std::vector<CoreMLOp *> &ops);
|
||||
|
||||
static void UpdateNH2NCTransNodePostOp(CoreMLOp *trans_op, CoreMLOp *post_op);
|
||||
|
||||
static void UpdateNC2NHTransNodePostOp(CoreMLOp *op, CoreMLOp *trans_op, CoreMLOp *post_op,
|
||||
const mindspore::MSTensor &org_in_tensor);
|
||||
|
||||
static bool IsNhwc2Nchw(CoreMLOp *op);
|
||||
|
||||
static bool IsNchw2Nhwc(CoreMLOp *op);
|
||||
static CoreMLOp *OpInputFromOp(CoreMLOp *op, mindspore::MSTensor in_tensor);
|
||||
static std::vector<mindspore::MSTensor> GetNonConstInputs(CoreMLOp *op);
|
||||
};
|
||||
} // namespace mindspore
|
||||
#endif // MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_COREML_PASS_COREML_PASS_UTILS_H_
|
|
@ -0,0 +1,316 @@
|
|||
/**
|
||||
* Copyright 2020-2022 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#include "src/runtime/delegate/coreml/pass/coreml_trans_extend_pass.h"
|
||||
#include <algorithm>
|
||||
#include <set>
|
||||
#include <string>
|
||||
#include "src/runtime/delegate/coreml/pass/coreml_pass_utils.h"
|
||||
|
||||
using mindspore::lite::RET_ERROR;
|
||||
using mindspore::lite::RET_OK;
|
||||
|
||||
namespace mindspore {
|
||||
std::set<mindspore::schema::PrimitiveType> format_depend_nodes = {
|
||||
schema::PrimitiveType_Conv2DFusion, schema::PrimitiveType_Conv2dTransposeFusion,
|
||||
schema::PrimitiveType_MaxPoolFusion, schema::PrimitiveType_AvgPoolFusion,
|
||||
schema::PrimitiveType_CropAndResize, schema::PrimitiveType_InstanceNorm,
|
||||
schema::PrimitiveType_ArgMaxFusion, schema::PrimitiveType_FullConnection,
|
||||
schema::PrimitiveType_ScaleFusion, schema::PrimitiveType_ExpandDims,
|
||||
schema::PrimitiveType_Unsqueeze, schema::PrimitiveType_SliceFusion,
|
||||
schema::PrimitiveType_BroadcastTo, schema::PrimitiveType_TileFusion,
|
||||
schema::PrimitiveType_Resize, schema::PrimitiveType_MatMulFusion,
|
||||
schema::PrimitiveType_Gather, schema::PrimitiveType_Gather,
|
||||
schema::PrimitiveType_Squeeze, schema::PrimitiveType_Reshape,
|
||||
schema::PrimitiveType_Unsqueeze, schema::PrimitiveType_Transpose,
|
||||
};
|
||||
|
||||
// this pass goal is to minimize subgraphs generated
|
||||
// by inserting nchw2nhwc or nhwc2nchw before or after the operator (e.g. concat, add, etc..) together with
|
||||
// fusion pass. If transpose inserted are more than half of input output, we will insert remaining input
|
||||
// output with transpose and hopefully do a fusion pass. Otherwise, we don't insert anything.
|
||||
|
||||
// Typically concat accept output from nchw2nhwc, we fill other input with nh2nc and nc2nh so that inputs to concat are
|
||||
// format same and then fusion all nchw2nhwc op.
|
||||
// e.g.
|
||||
// original (conv->nchw2nhwc, add(format nhwc)) -> concat-> (nhwc2nchw->conv)
|
||||
// current pass (conv->nchw2nhwc, add->nhwc2nchw->nchw2nhwc) -> concat -> (nhwc2nchw->conv)
|
||||
// fusion pass (conv, add->nhwc2nchw) -> concat -> conv
|
||||
// original 2 cpusubgraph, after 2 pass, only 1 cpu subgraph
|
||||
|
||||
// Such ops require inputs all have same format, could be nchw or nhwc or other format.
|
||||
// Their inputs outputs may not be 4d, or are already format ok,
|
||||
// so we won't insert nc2nh or nh2nc when op's in ops and out ops contains no nc2nh or nh2nc.
|
||||
// This pass should be run after npu_transform_pass, which insert transpose for nchw-input-limited op like conv2d.
|
||||
|
||||
InsertState CoreMLTransExtendPass::GetInsertState(CoreMLOp *op) {
|
||||
// filter out irrelevant op
|
||||
if (format_depend_nodes.find(op->type()) != format_depend_nodes.end()) {
|
||||
return InsertState::InsertNone;
|
||||
}
|
||||
// current op is target op
|
||||
// Use out ops to count the out lines from current op since a single tensor can be used by multiple out ops. Besides,
|
||||
// a tensor can be used by out ops and graph output at the same time, there will be one more line in this case.
|
||||
std::vector<mindspore::MSTensor> inputs = CoreMLPassUtils::GetNonConstInputs(op);
|
||||
size_t in_out_tensor_num =
|
||||
inputs.size() + std::max(std::max(op->out_ops().size(), static_cast<size_t>(1)), op->outputs().size());
|
||||
size_t transpose_input_num = 0;
|
||||
size_t transpose_output_num = 0;
|
||||
size_t graph_input_num = 0;
|
||||
size_t graph_output_num = 0;
|
||||
bool need_pre_insert = false;
|
||||
bool need_post_insert = false;
|
||||
// count number of input tensor from nc2nh and output tensor to nh2nc
|
||||
for (size_t i = 0; i < inputs.size(); ++i) {
|
||||
auto in_op = CoreMLPassUtils::OpInputFromOp(op, inputs.at(i));
|
||||
if (CoreMLPassUtils::IsNchw2Nhwc(in_op)) {
|
||||
transpose_input_num++;
|
||||
} else {
|
||||
need_pre_insert = true;
|
||||
}
|
||||
if (in_op == nullptr) {
|
||||
graph_input_num++;
|
||||
}
|
||||
}
|
||||
auto graph_output = subgraph_->outputs();
|
||||
for (auto output : op->outputs()) {
|
||||
if (std::find(graph_output.begin(), graph_output.end(), output) != graph_output.end()) {
|
||||
graph_output_num++;
|
||||
need_post_insert = true;
|
||||
}
|
||||
}
|
||||
for (const auto out_op : op->out_ops()) {
|
||||
for (auto out_op_input : out_op->inputs()) {
|
||||
if (std::find(graph_output.begin(), graph_output.end(), out_op_input) != graph_output.end()) {
|
||||
in_out_tensor_num++;
|
||||
}
|
||||
}
|
||||
if (CoreMLPassUtils::IsNhwc2Nchw(out_op)) {
|
||||
transpose_output_num++;
|
||||
} else {
|
||||
need_post_insert = true;
|
||||
}
|
||||
}
|
||||
|
||||
// won't insert any thing if num of transpose tensor is smaller than half of total op inputs and op outputs, unless
|
||||
// current op is the graph input or output op, since we should avoid to build a single op subgraph in this case.
|
||||
// won't insert if total input output are all transpose tensor, the fusion pass will handle this.
|
||||
size_t transpose_tensor_num = transpose_input_num + transpose_output_num;
|
||||
size_t connected_in_out_tensor_num = in_out_tensor_num - graph_output_num - graph_input_num;
|
||||
if (transpose_tensor_num == 0 || transpose_tensor_num * REPEAT_TIMES2 < connected_in_out_tensor_num ||
|
||||
transpose_tensor_num == in_out_tensor_num) {
|
||||
return InsertState::InsertNone;
|
||||
}
|
||||
InsertState ret = (need_pre_insert && need_post_insert)
|
||||
? InsertState::BothInsert
|
||||
: (need_pre_insert ? InsertState::PreInsert
|
||||
: (need_post_insert ? InsertState::PostInsert : InsertState::InsertNone));
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int CoreMLTransExtendPass::InsertTransNode(CoreMLOp *op, CoreMLOp *post_op, const mindspore::MSTensor &trans_in_tensor,
|
||||
std::vector<CoreMLOp *> *trans_ops) {
|
||||
MS_ASSERT(op != nullptr || post_op != nullptr);
|
||||
std::string op_name;
|
||||
std::vector<CoreMLOp *> in_ops;
|
||||
std::vector<CoreMLOp *> out_ops;
|
||||
if (op != nullptr) {
|
||||
op_name = op->name() + "_post";
|
||||
in_ops.emplace_back(op);
|
||||
}
|
||||
if (post_op != nullptr) {
|
||||
op_name = post_op->name() + "_pre";
|
||||
out_ops.emplace_back(post_op);
|
||||
}
|
||||
auto nhwc_shape = trans_in_tensor.Shape();
|
||||
std::vector<int64_t> nchw_shape = {nhwc_shape[kNHWC_N], nhwc_shape[kNHWC_C], nhwc_shape[kNHWC_H],
|
||||
nhwc_shape[kNHWC_W]};
|
||||
|
||||
auto nh2nc_name = op_name + "_nh2nc_" + std::to_string(total++);
|
||||
auto nh2nc_tensor =
|
||||
mindspore::MSTensor::CreateTensor(nh2nc_name + "/output0", trans_in_tensor.DataType(), nchw_shape, nullptr, 0);
|
||||
if (nh2nc_tensor == nullptr) {
|
||||
MS_LOG(ERROR) << "New nchw tensor failed when inserting nchw2nhwc op.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
nh2nc_tensor->SetFormat(Format::NCHW);
|
||||
std::vector<mindspore::MSTensor> nh2nc_tensors = {*nh2nc_tensor};
|
||||
all_tensors_->push_back(nh2nc_tensor);
|
||||
|
||||
auto nc2nh_name = op_name + "_nc2nh_" + std::to_string(total++);
|
||||
auto nc2nh_tensor =
|
||||
mindspore::MSTensor::CreateTensor(nc2nh_name + "/output0", trans_in_tensor.DataType(), nhwc_shape, nullptr, 0);
|
||||
if (nc2nh_tensor == nullptr) {
|
||||
MS_LOG(ERROR) << "New nhwc tensor failed when inserting nhwc2nchw op.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
nc2nh_tensor->SetFormat(Format::NHWC);
|
||||
std::vector<mindspore::MSTensor> nc2nh_tensors = {*nc2nh_tensor};
|
||||
all_tensors_->push_back(nc2nh_tensor);
|
||||
|
||||
auto *nh2nc_op = CoreMLPassUtils::CreateNhwc2NchwOp({trans_in_tensor}, nh2nc_tensors, nh2nc_name);
|
||||
trans_ops->push_back(nh2nc_op);
|
||||
|
||||
auto *nc2nh_op = CoreMLPassUtils::CreateNchw2NhwcOp(nh2nc_tensors, nc2nh_tensors, nc2nh_name);
|
||||
trans_ops->push_back(nc2nh_op);
|
||||
|
||||
CoreMLPassUtils::UpdateOp(nh2nc_op, in_ops, {nc2nh_op}, {trans_in_tensor}, nh2nc_tensors);
|
||||
CoreMLPassUtils::UpdateOp(nc2nh_op, {nh2nc_op}, out_ops, {nh2nc_tensors[0]}, nc2nh_tensors);
|
||||
if (op != nullptr) {
|
||||
CoreMLPassUtils::UpdateNH2NCTransNodePreOp(op, nh2nc_op, post_op);
|
||||
}
|
||||
if (post_op != nullptr) {
|
||||
CoreMLPassUtils::UpdateNC2NHTransNodePostOp(op, nc2nh_op, post_op, trans_in_tensor);
|
||||
} else {
|
||||
// post_op nullptr mean output, we remain graph output tensor name unchanged
|
||||
auto graph_output_name = trans_in_tensor.Name();
|
||||
nc2nh_tensor->SetTensorName(graph_output_name + "_after_" + name_);
|
||||
}
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
int CoreMLTransExtendPass::InsertPreNodes(CoreMLOp *op, std::vector<CoreMLOp *> *trans_ops) {
|
||||
int ret = RET_OK;
|
||||
auto inputs = CoreMLPassUtils::GetNonConstInputs(op);
|
||||
for (auto tensor : inputs) {
|
||||
if (tensor.Shape().size() < COMM_SHAPE_SIZE) {
|
||||
continue;
|
||||
}
|
||||
// the input tensor can only come from a single op
|
||||
auto pre_op = CoreMLPassUtils::OpInputFromOp(op, tensor);
|
||||
if (CoreMLPassUtils::IsNchw2Nhwc(pre_op)) {
|
||||
continue;
|
||||
}
|
||||
// if this tensor is input of graph, pre_op is nullptr.;
|
||||
ret = InsertTransNode(pre_op, op, tensor, trans_ops);
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(ERROR) << "Insert nhwc2nchw op and nchw2nhwc op before op " << op->name() << " failed.";
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
int CoreMLTransExtendPass::InsertPostNodes(CoreMLOp *op, std::vector<CoreMLOp *> *trans_ops) {
|
||||
int ret = RET_OK;
|
||||
for (size_t idx = 0; idx < op->outputs().size(); idx++) {
|
||||
auto out_tensor = op->outputs().at(idx);
|
||||
if (out_tensor.Shape().size() < COMM_SHAPE_SIZE) {
|
||||
continue;
|
||||
}
|
||||
if (std::find(subgraph_->outputs().begin(), subgraph_->outputs().end(), out_tensor) != subgraph_->outputs().end()) {
|
||||
// the case that op's out tensor is graph output
|
||||
ret = InsertTransNode(op, nullptr, op->outputs().at(idx), trans_ops);
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(ERROR) << "Insert nhwc2nchw op and nchw2nhwc op after op " << op->name() << " failed.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
// use origin output as the last trans op's output in order to avoid the lost of the output tensor after transpose
|
||||
// fusion. The input of the cur_op's out_op will be updated in the loop below.
|
||||
auto last_trans = trans_ops->back();
|
||||
auto trans_output = last_trans->outputs();
|
||||
auto cur_outputs = op->outputs();
|
||||
cur_outputs[idx] = last_trans->outputs()[0];
|
||||
trans_output[0] = op->outputs()[idx];
|
||||
last_trans->set_outputs(trans_output);
|
||||
op->set_outputs(cur_outputs);
|
||||
}
|
||||
|
||||
// besides of being as graph outputs, the output tensors also can connected with multiple ops.
|
||||
for (auto post_op : op->out_ops()) {
|
||||
auto post_op_input = post_op->inputs();
|
||||
auto it = std::find(post_op_input.begin(), post_op_input.end(), out_tensor);
|
||||
if (it == post_op_input.end()) {
|
||||
continue;
|
||||
}
|
||||
auto related_idx = it - post_op_input.begin();
|
||||
post_op_input[related_idx] = op->outputs().at(idx);
|
||||
post_op->set_inputs(post_op_input);
|
||||
|
||||
if (CoreMLPassUtils::IsNhwc2Nchw(post_op)) {
|
||||
continue;
|
||||
}
|
||||
// the case that op's out tensor is one of post_op's input tensor
|
||||
ret = InsertTransNode(op, post_op, op->outputs().at(idx), trans_ops);
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(ERROR) << "Insert nhwc2nchw op and nchw2nhwc op after op " << op->name() << " failed.";
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
int CoreMLTransExtendPass::Run(CoreMLGraph *subgraph) {
|
||||
subgraph_ = subgraph;
|
||||
all_ops_ = subgraph_->GetOps();
|
||||
all_tensors_ = subgraph_->GetInsertTensors();
|
||||
std::vector<CoreMLOp *> insert_ops;
|
||||
for (int j = 0; j < REPEAT_TIMES2; ++j) {
|
||||
for (size_t i = 0; i < all_ops_->size(); i++) {
|
||||
auto op = (*all_ops_)[i];
|
||||
auto insert_state = GetInsertState(op);
|
||||
insert_ops.clear();
|
||||
// If the every output op is nhwc2nchw, insert
|
||||
// modify loop index add post_ops.size() to the next op in the origin vector
|
||||
switch (insert_state) {
|
||||
case InsertState::PreInsert: {
|
||||
auto ret = InsertPreNodes(op, &insert_ops);
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(ERROR) << "Insert nhwc2nchw op and nchw2nhwc op before op " << op->name() << " failed.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
all_ops_->insert(all_ops_->begin() + i, insert_ops.begin(), insert_ops.end());
|
||||
i += insert_ops.size();
|
||||
break;
|
||||
}
|
||||
case InsertState::PostInsert: {
|
||||
auto ret = InsertPostNodes(op, &insert_ops);
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(ERROR) << "Insert nhwc2nchw op and nchw2nhwc op after op " << op->name() << " failed.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
all_ops_->insert(all_ops_->begin() + i + 1, insert_ops.begin(), insert_ops.end());
|
||||
i += insert_ops.size();
|
||||
break;
|
||||
}
|
||||
case InsertState::BothInsert: {
|
||||
auto ret = InsertPreNodes(op, &insert_ops);
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(ERROR) << "Insert nhwc2nchw op and nchw2nhwc op before op " << op->name() << " failed.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
all_ops_->insert(all_ops_->begin() + i, insert_ops.begin(), insert_ops.end());
|
||||
i += insert_ops.size();
|
||||
|
||||
insert_ops.clear();
|
||||
ret = InsertPostNodes(op, &insert_ops);
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(ERROR) << "Insert nhwc2nchw op and nchw2nhwc op after op " << op->name() << " failed.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
all_ops_->insert(all_ops_->begin() + i + 1, insert_ops.begin(), insert_ops.end());
|
||||
i += insert_ops.size();
|
||||
break;
|
||||
}
|
||||
default:
|
||||
MS_LOG(DEBUG) << "Insert Nothing on op " << op->name();
|
||||
}
|
||||
}
|
||||
}
|
||||
return RET_OK;
|
||||
}
|
||||
} // namespace mindspore
|
|
@ -0,0 +1,45 @@
|
|||
/**
|
||||
* Copyright 2020-2022 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_COREML_PASS_COREML_TRANS_EXTEND_PASS_H_
|
||||
#define MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_COREML_PASS_COREML_TRANS_EXTEND_PASS_H_
|
||||
#include <vector>
|
||||
#include "src/runtime/delegate/coreml/op/coreml_op.h"
|
||||
#include "src/runtime/delegate/coreml/pass/coreml_base_pass.h"
|
||||
namespace mindspore {
|
||||
enum class InsertState { InsertNone, PreInsert, PostInsert, BothInsert };
|
||||
class CoreMLTransExtendPass : public CoreMLBasePass {
|
||||
public:
|
||||
CoreMLTransExtendPass() { name_ = "CoreMLTransExtendPass"; }
|
||||
|
||||
int Run(CoreMLGraph *subgraph) override;
|
||||
|
||||
private:
|
||||
InsertState GetInsertState(CoreMLOp *op);
|
||||
bool IsNeedInsert(size_t transpose_tensor_num, size_t graph_input_num, size_t graph_output_num,
|
||||
size_t in_out_tensor_num, bool need_pre_insert, bool need_post_insert);
|
||||
int InsertPreNodes(CoreMLOp *op, std::vector<CoreMLOp *> *trans_ops);
|
||||
int InsertPostNodes(CoreMLOp *op, std::vector<CoreMLOp *> *trans_ops);
|
||||
int InsertTransNode(CoreMLOp *op, CoreMLOp *post_op, const mindspore::MSTensor &trans_in_tensor,
|
||||
std::vector<CoreMLOp *> *trans_ops);
|
||||
|
||||
private:
|
||||
int total = 0;
|
||||
std::vector<CoreMLOp *> *all_ops_ = nullptr;
|
||||
std::vector<mindspore::MSTensor *> *all_tensors_ = nullptr;
|
||||
};
|
||||
} // namespace mindspore
|
||||
#endif // MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_COREML_PASS_COREML_TRANS_EXTEND_PASS_H_
|
|
@ -15,8 +15,49 @@
|
|||
*/
|
||||
|
||||
#include "src/runtime/delegate/delegate_utils.h"
|
||||
namespace mindspore::lite {
|
||||
#include "nnacl/fp32/pack_fp32.h"
|
||||
namespace mindspore {
|
||||
namespace lite {
|
||||
void PackNHWCToNCHWFp32(const void *src, void *dst, int batches, int plane, int channel) {
|
||||
int hw8 = plane / C8NUM * C8NUM;
|
||||
int batch = plane * channel;
|
||||
for (int n = 0; n < batches; n++) {
|
||||
const float *src_batch = (const float *)src + n * batch;
|
||||
float *dst_batch = reinterpret_cast<float *>(dst) + n * batch;
|
||||
int hw = 0;
|
||||
for (; hw < hw8; hw += C8NUM) {
|
||||
int c = 0;
|
||||
#ifdef ENABLE_ARM64
|
||||
for (; c <= channel - C8NUM; c += C8NUM) {
|
||||
const float *src_ptr = src_batch + hw * channel + c;
|
||||
float *dst_ptr = dst_batch + c * plane + hw;
|
||||
Transpose8X8Fp32Arm64(src_ptr, dst_ptr, channel, plane);
|
||||
}
|
||||
#endif
|
||||
for (; c < channel; c++) {
|
||||
const float *src_ptr = src_batch + hw * channel + c;
|
||||
float *dst_ptr = dst_batch + c * plane + hw;
|
||||
for (size_t i = 0; i < C8NUM; i++) {
|
||||
dst_ptr[i] = src_ptr[i * channel];
|
||||
}
|
||||
}
|
||||
}
|
||||
for (; hw < plane; hw++) {
|
||||
const float *src_ptr = src_batch + hw * channel;
|
||||
float *dst_ptr = dst_batch + hw;
|
||||
for (size_t i = 0; i < channel; i++) {
|
||||
dst_ptr[i * plane] = src_ptr[i];
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void PackNCHWToNHWCFp32(const void *src, void *dst, int batch, int plane, int channel) {
|
||||
return PackNHWCToNCHWFp32(src, dst, batch, channel, plane);
|
||||
}
|
||||
|
||||
bool IsSubGraphInputTensor(const std::vector<mindspore::MSTensor> &inputs, mindspore::MSTensor input) {
|
||||
return std::find(inputs.begin(), inputs.end(), input) != inputs.end();
|
||||
}
|
||||
} // namespace mindspore::lite
|
||||
} // namespace lite
|
||||
} // namespace mindspore
|
||||
|
|
|
@ -23,6 +23,10 @@
|
|||
namespace mindspore::lite {
|
||||
bool IsSubGraphInputTensor(const std::vector<mindspore::MSTensor> &inputs, mindspore::MSTensor input);
|
||||
|
||||
void PackNHWCToNCHWFp32(const void *src, void *dst, int batches, int plane, int channel);
|
||||
|
||||
void PackNCHWToNHWCFp32(const void *src, void *dst, int batch, int plane, int channel);
|
||||
|
||||
template <typename T>
|
||||
std::vector<mindspore::MSTensor> GetGraphInTensors(std::vector<T *> ops, std::vector<size_t> *input_index) {
|
||||
std::vector<mindspore::MSTensor> inputs;
|
||||
|
|
|
@ -4,6 +4,7 @@ file(GLOB_RECURSE NPU_RUNTIME_SRC
|
|||
${CMAKE_CURRENT_SOURCE_DIR}/*.cc
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/op/*.cc
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/pass/*.cc
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../delegate_utils.cc
|
||||
)
|
||||
add_library(hiai SHARED IMPORTED)
|
||||
set_target_properties(hiai PROPERTIES IMPORTED_LOCATION
|
||||
|
|
|
@ -286,38 +286,10 @@ NPUOp *NPUDelegate::GetOP(kernel::Kernel *kernel, const schema::Primitive *primi
|
|||
return npu_op;
|
||||
}
|
||||
|
||||
std::vector<mindspore::MSTensor> GraphOutTensors(const std::vector<NPUOp *> &ops,
|
||||
DelegateModel<schema::Primitive> *model, KernelIter from,
|
||||
KernelIter end) {
|
||||
auto out_tensors = lite::GetGraphOutTensors(ops);
|
||||
std::vector<mindspore::MSTensor> all_out_tensors;
|
||||
for (auto op : ops) {
|
||||
for (const auto &out_tensor : op->outputs()) {
|
||||
if (find(out_tensors.begin(), out_tensors.end(), out_tensor) == out_tensors.end()) {
|
||||
all_out_tensors.push_back(out_tensor);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (auto iter = model->BeginKernelIterator(); iter != model->EndKernelIterator(); iter++) {
|
||||
if (iter >= from && iter <= end) {
|
||||
continue;
|
||||
}
|
||||
// The input of other kernels is the output of the current subgraph kernel.
|
||||
for (const auto &in_tensor : (*iter)->inputs()) {
|
||||
if (find(all_out_tensors.begin(), all_out_tensors.end(), in_tensor) != all_out_tensors.end() &&
|
||||
find(out_tensors.begin(), out_tensors.end(), in_tensor) == out_tensors.end()) {
|
||||
out_tensors.push_back(in_tensor);
|
||||
}
|
||||
}
|
||||
}
|
||||
return out_tensors;
|
||||
}
|
||||
|
||||
kernel::Kernel *NPUDelegate::CreateNPUGraph(const std::vector<NPUOp *> &ops, DelegateModel<schema::Primitive> *model,
|
||||
KernelIter from, KernelIter end) {
|
||||
auto in_tensors = lite::GetGraphInTensors(ops, nullptr);
|
||||
auto out_tensors = GraphOutTensors(ops, model, from, end);
|
||||
auto out_tensors = lite::GraphOutTensors<NPUOp>(ops, model, from, end);
|
||||
auto graph_kernel = new (std::nothrow) NPUGraph(ops, npu_manager_, in_tensors, out_tensors);
|
||||
if (graph_kernel == nullptr) {
|
||||
MS_LOG(DEBUG) << "New NPU Graph failed.";
|
||||
|
@ -326,12 +298,14 @@ kernel::Kernel *NPUDelegate::CreateNPUGraph(const std::vector<NPUOp *> &ops, Del
|
|||
// 1. For every op, find pre and next ops
|
||||
auto ret = graph_kernel->FindPreNextOps();
|
||||
if (ret != RET_OK) {
|
||||
delete graph_kernel;
|
||||
MS_LOG(DEBUG) << "NPU Graph find input and output ops for every op failed.";
|
||||
return nullptr;
|
||||
}
|
||||
// 2. Pass
|
||||
ret = pass_manager_->RunPass(graph_kernel);
|
||||
if (ret != RET_OK) {
|
||||
delete graph_kernel;
|
||||
MS_LOG(DEBUG) << "NPU Graph run pass failed. This function mainly solves the problem that the format is "
|
||||
"inconsistent and requires interpolation transpose operators.";
|
||||
return nullptr;
|
||||
|
@ -339,6 +313,7 @@ kernel::Kernel *NPUDelegate::CreateNPUGraph(const std::vector<NPUOp *> &ops, Del
|
|||
// 3. NPUGraph init, create subgraph_kernel and transpose_kernel
|
||||
ret = graph_kernel->Init();
|
||||
if (ret != RET_OK) {
|
||||
delete graph_kernel;
|
||||
MS_LOG(DEBUG) << "NPU subgraph Init failed.";
|
||||
return nullptr;
|
||||
}
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
#include "src/runtime/delegate/npu/op/convolution_base_npu.h"
|
||||
#include "src/runtime/delegate/npu/npu_converter_utils.h"
|
||||
#include "src/runtime/delegate/npu/transpose_kernel.h"
|
||||
#include "src/runtime/delegate/delegate_utils.h"
|
||||
#include "nnacl/int8/pack_int8.h"
|
||||
|
||||
namespace mindspore {
|
||||
|
@ -72,7 +73,8 @@ int ConvolutionBaseNPUOp::InitWeightConst(const std::vector<mindspore::MSTensor>
|
|||
// weight fp16->fp32
|
||||
Float16ToFloat32(reinterpret_cast<const float16_t *>(origin_weight), reinterpret_cast<float *>(fp32_weight_),
|
||||
inputs[1].ElementNum());
|
||||
PackNHWCToNCHWFp32(fp32_weight_, nchw_weight_, w_shape[NHWC_N], w_shape[NHWC_H] * w_shape[NHWC_W], w_shape[NHWC_C]);
|
||||
lite::PackNHWCToNCHWFp32(fp32_weight_, nchw_weight_, w_shape[NHWC_N], w_shape[NHWC_H] * w_shape[NHWC_W],
|
||||
w_shape[NHWC_C]);
|
||||
#else
|
||||
MS_LOG(ERROR) << "This platform does not support fp16.";
|
||||
FreeTmpWeight();
|
||||
|
@ -84,8 +86,8 @@ int ConvolutionBaseNPUOp::InitWeightConst(const std::vector<mindspore::MSTensor>
|
|||
MS_LOG(ERROR) << "Malloc buffer failed.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
PackNHWCToNCHWFp32(origin_weight, nchw_weight_, w_shape[NHWC_N], w_shape[NHWC_H] * w_shape[NHWC_W],
|
||||
w_shape[NHWC_C]);
|
||||
lite::PackNHWCToNCHWFp32(origin_weight, nchw_weight_, w_shape[NHWC_N], w_shape[NHWC_H] * w_shape[NHWC_W],
|
||||
w_shape[NHWC_C]);
|
||||
} else if (inputs[1].DataType() == DataType::kNumberTypeInt8) {
|
||||
nchw_weight_ = malloc(inputs[1].ElementNum() * sizeof(int8_t));
|
||||
if (nchw_weight_ == nullptr) {
|
||||
|
|
|
@ -17,46 +17,9 @@
|
|||
#include "src/runtime/delegate/npu/transpose_kernel.h"
|
||||
#include "src/runtime/delegate/npu/npu_converter_utils.h"
|
||||
#include "src/runtime/delegate/npu/op/npu_op.h"
|
||||
#include "src/runtime/delegate/delegate_utils.h"
|
||||
#include "nnacl/fp32/pack_fp32.h"
|
||||
namespace mindspore {
|
||||
void PackNHWCToNCHWFp32(const void *src, void *dst, int batches, int plane, int channel) {
|
||||
int hw8 = plane / C8NUM * C8NUM;
|
||||
int batch = plane * channel;
|
||||
for (int n = 0; n < batches; n++) {
|
||||
const float *src_batch = (const float *)src + n * batch;
|
||||
float *dst_batch = reinterpret_cast<float *>(dst) + n * batch;
|
||||
int hw = 0;
|
||||
for (; hw < hw8; hw += C8NUM) {
|
||||
int c = 0;
|
||||
#ifdef ENABLE_ARM64
|
||||
for (; c <= channel - C8NUM; c += C8NUM) {
|
||||
const float *src_ptr = src_batch + hw * channel + c;
|
||||
float *dst_ptr = dst_batch + c * plane + hw;
|
||||
Transpose8X8Fp32Arm64(src_ptr, dst_ptr, channel, plane);
|
||||
}
|
||||
#endif
|
||||
for (; c < channel; c++) {
|
||||
const float *src_ptr = src_batch + hw * channel + c;
|
||||
float *dst_ptr = dst_batch + c * plane + hw;
|
||||
for (size_t i = 0; i < C8NUM; i++) {
|
||||
dst_ptr[i] = src_ptr[i * channel];
|
||||
}
|
||||
}
|
||||
}
|
||||
for (; hw < plane; hw++) {
|
||||
const float *src_ptr = src_batch + hw * channel;
|
||||
float *dst_ptr = dst_batch + hw;
|
||||
for (size_t i = 0; i < channel; i++) {
|
||||
dst_ptr[i * plane] = src_ptr[i];
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void PackNCHWToNHWCFp32(const void *src, void *dst, int batch, int plane, int channel) {
|
||||
return PackNHWCToNCHWFp32(src, dst, batch, channel, plane);
|
||||
}
|
||||
|
||||
int TransposeNPUKernel::Execute() {
|
||||
if (perm_ != NHWC2NCHW_PERM && perm_ != NCHW2NHWC_PERM) {
|
||||
MS_LOG(ERROR) << "NPU transpose op only supports nhwc->nchw or nchw->nhwc.";
|
||||
|
@ -74,9 +37,9 @@ int TransposeNPUKernel::Execute() {
|
|||
auto output = out_tensor.MutableData();
|
||||
MS_ASSERT(output);
|
||||
if (perm_ == NHWC2NCHW_PERM) {
|
||||
PackNHWCToNCHWFp32(input, output, shape[NHWC_N], shape[NHWC_H] * shape[NHWC_W], shape[NHWC_C]);
|
||||
lite::PackNHWCToNCHWFp32(input, output, shape[NHWC_N], shape[NHWC_H] * shape[NHWC_W], shape[NHWC_C]);
|
||||
} else if (perm_ == NCHW2NHWC_PERM) {
|
||||
PackNCHWToNHWCFp32(input, output, shape[NCHW_N], shape[NCHW_H] * shape[NCHW_W], shape[NCHW_C]);
|
||||
lite::PackNCHWToNHWCFp32(input, output, shape[NCHW_N], shape[NCHW_H] * shape[NCHW_W], shape[NCHW_C]);
|
||||
} else {
|
||||
MS_LOG(ERROR) << "NPU transpose op only supports nhwc->nchw or nchw->nhwc.";
|
||||
return RET_ERROR;
|
||||
|
|
|
@ -52,6 +52,9 @@
|
|||
#if GPU_TENSORRT
|
||||
#include "src/extendrt/delegate/tensorrt/tensorrt_delegate.h"
|
||||
#endif
|
||||
#ifdef ENABLE_COREML
|
||||
#include "src/runtime/delegate/coreml/coreml_delegate.h"
|
||||
#endif
|
||||
#include "src/runtime/runtime_convert.h"
|
||||
#include "extendrt/mindir_loader/model_loader.h"
|
||||
|
||||
|
@ -820,21 +823,38 @@ int LiteSession::CreateNPUDelegate() {
|
|||
return RET_OK;
|
||||
}
|
||||
|
||||
int LiteSession::CreateCoreMLDelegate() {
|
||||
#ifdef ENABLE_COREML
|
||||
delegate_ = std::make_shared<CoreMLDelegate>();
|
||||
if (delegate_ == nullptr) {
|
||||
MS_LOG(ERROR) << "New delegate_ failed";
|
||||
return RET_ERROR;
|
||||
}
|
||||
delegate_device_type_ = DT_CPU;
|
||||
this->context_->delegate = delegate_;
|
||||
#endif
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
int LiteSession::DelegateInit() {
|
||||
#ifndef DELEGATE_CLIP
|
||||
if (context_->delegate != nullptr) {
|
||||
delegate_ = context_->delegate;
|
||||
delegate_device_type_ = -1;
|
||||
} else {
|
||||
auto ret = CreateCoreMLDelegate();
|
||||
if (ret != RET_OK) {
|
||||
return ret;
|
||||
}
|
||||
if (context_->IsDeviceTypeEnabled(DT_NPU)) {
|
||||
auto ret = CreateNPUDelegate();
|
||||
ret = CreateNPUDelegate();
|
||||
if (ret != RET_OK) {
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
if (context_->IsDeviceTypeEnabled(DT_GPU)) {
|
||||
auto ret = CreateTensorRTDelegate();
|
||||
ret = CreateTensorRTDelegate();
|
||||
if (ret != RET_OK) {
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -150,6 +150,7 @@ class LiteSession {
|
|||
int ContextInit(InnerContext *context);
|
||||
int CreateTensorRTDelegate();
|
||||
int CreateNPUDelegate();
|
||||
int CreateCoreMLDelegate();
|
||||
int DelegateInit();
|
||||
int InitGPURuntime();
|
||||
|
||||
|
|
|
@ -367,7 +367,8 @@ npu_files=()
|
|||
while IFS='' read -r line; do npu_files+=("$line"); done < <(ls mindspore/lite/src/runtime/delegate/npu/*.cc)
|
||||
while IFS='' read -r line; do npu_files+=("$line"); done < <(ls mindspore/lite/src/runtime/delegate/npu/op/*.cc)
|
||||
while IFS='' read -r line; do npu_files+=("$line"); done < <(ls mindspore/lite/src/runtime/delegate/npu/pass/*.cc)
|
||||
|
||||
npu_others_files=("mindspore/lite/src/runtime/delegate/delegate_utils.cc")
|
||||
npu_files=("${npu_files[@]}" "${npu_others_files[@]}")
|
||||
# shellcheck disable=SC2068
|
||||
for file in ${npu_files[@]}; do
|
||||
file=$(echo ${file} | awk -F '/' '{print $NF}')
|
||||
|
|
|
@ -0,0 +1,95 @@
|
|||
// Copyright (c) 2017, Apple Inc. All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-3-clause license that can be
|
||||
// found in LICENSE.txt or at https://opensource.org/licenses/BSD-3-Clause
|
||||
|
||||
syntax = "proto3";
|
||||
option optimize_for = LITE_RUNTIME;
|
||||
|
||||
import public "FeatureTypes.proto";
|
||||
|
||||
package CoreML.Specification;
|
||||
|
||||
/**
|
||||
* A mapping from a string
|
||||
* to a 64-bit integer.
|
||||
*/
|
||||
message StringToInt64Map {
|
||||
map<string, int64> map = 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* A mapping from a 64-bit integer
|
||||
* to a string.
|
||||
*/
|
||||
message Int64ToStringMap {
|
||||
map<int64, string> map = 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* A mapping from a string
|
||||
* to a double-precision floating point number.
|
||||
*/
|
||||
message StringToDoubleMap {
|
||||
map<string, double> map = 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* A mapping from a 64-bit integer
|
||||
* to a double-precision floating point number.
|
||||
*/
|
||||
message Int64ToDoubleMap {
|
||||
map<int64, double> map = 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* A vector of strings.
|
||||
*/
|
||||
message StringVector {
|
||||
repeated string vector = 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* A vector of 64-bit integers.
|
||||
*/
|
||||
message Int64Vector {
|
||||
repeated int64 vector = 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* A vector of floating point numbers.
|
||||
*/
|
||||
message FloatVector {
|
||||
repeated float vector = 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* A vector of double-precision floating point numbers.
|
||||
*/
|
||||
message DoubleVector {
|
||||
repeated double vector = 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* A range of int64 values
|
||||
*/
|
||||
message Int64Range {
|
||||
int64 minValue = 1;
|
||||
int64 maxValue = 2;
|
||||
}
|
||||
|
||||
/**
|
||||
* A set of int64 values
|
||||
*/
|
||||
message Int64Set {
|
||||
repeated int64 values = 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* A range of double values
|
||||
*/
|
||||
message DoubleRange {
|
||||
double minValue = 1;
|
||||
double maxValue = 2;
|
||||
}
|
||||
|
|
@ -0,0 +1,224 @@
|
|||
// Copyright (c) 2017, Apple Inc. All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-3-clause license that can be
|
||||
// found in LICENSE.txt or at https://opensource.org/licenses/BSD-3-Clause
|
||||
|
||||
syntax = "proto3";
|
||||
option optimize_for = LITE_RUNTIME;
|
||||
|
||||
package CoreML.Specification;
|
||||
|
||||
/**
|
||||
* The 64-bit integer feature type.
|
||||
*/
|
||||
message Int64FeatureType {}
|
||||
|
||||
/**
|
||||
* The double-precision floating point number feature type.
|
||||
*/
|
||||
message DoubleFeatureType {}
|
||||
|
||||
/**
|
||||
* The string feature type.
|
||||
*/
|
||||
message StringFeatureType {}
|
||||
|
||||
|
||||
message SizeRange {
|
||||
uint64 lowerBound = 1;
|
||||
int64 upperBound = 2; // negative value means unbound otherwise upperbound is included in range
|
||||
}
|
||||
|
||||
/**
|
||||
* The image feature type.
|
||||
*/
|
||||
message ImageFeatureType {
|
||||
// Assumes raw (decompressed) format
|
||||
enum ColorSpace {
|
||||
INVALID_COLOR_SPACE = 0;
|
||||
GRAYSCALE = 10; // 8 bits per pixel
|
||||
RGB = 20; // 32 bits per pixel: RGBA with A channel ignored
|
||||
BGR = 30; // 32 bits per pixel: BGRA with A channel ignored
|
||||
}
|
||||
|
||||
message ImageSize {
|
||||
uint64 width = 1;
|
||||
uint64 height = 2;
|
||||
}
|
||||
|
||||
message EnumeratedImageSizes {
|
||||
repeated ImageSize sizes = 1;
|
||||
}
|
||||
|
||||
message ImageSizeRange {
|
||||
SizeRange widthRange = 1;
|
||||
SizeRange heightRange = 2;
|
||||
}
|
||||
|
||||
// The required or default image size is width x height
|
||||
//
|
||||
// If specificationVersion <= 2 or SizeFlexibility is empty,
|
||||
// width x height is the required fixed image size
|
||||
//
|
||||
// If SizeFlexibility is present, width x height indicate a "default"
|
||||
// image size which must be consistent with the flexibilty specified
|
||||
|
||||
int64 width = 1;
|
||||
int64 height = 2;
|
||||
|
||||
// For specification version >= 3 you can specify image size flexibility.
|
||||
|
||||
oneof SizeFlexibility {
|
||||
|
||||
// Use enumeratedSizes for a set of distinct fixed sizes
|
||||
// e.g. portrait or landscape: [80 x 100, 100 x 8]
|
||||
//
|
||||
// If the width x height fields above are specified then they must be
|
||||
// one of the sizes listed.
|
||||
//
|
||||
// If width and height are not specified above then the default width
|
||||
// and height will be enumeratedSizes[0]
|
||||
//
|
||||
// Must be non-empty
|
||||
|
||||
EnumeratedImageSizes enumeratedSizes = 21;
|
||||
|
||||
// Use imageSizeRange to allow for ranges of values
|
||||
// e.g. any image greater than 10 x 20: [10..<max] x [20..<max]
|
||||
//
|
||||
// If width and height are specified above they must fall in the range
|
||||
// specified in imageSizeRange. They will be treated as the default size.
|
||||
//
|
||||
// If width and height are not specified above then the default width
|
||||
// and height will be imageSizeRange.widthRange.lowerBound x imageSizeRange.heightRange.lowerBound
|
||||
|
||||
ImageSizeRange imageSizeRange = 31;
|
||||
}
|
||||
|
||||
ColorSpace colorSpace = 3;
|
||||
}
|
||||
|
||||
/**
|
||||
* The array feature type.
|
||||
*/
|
||||
message ArrayFeatureType {
|
||||
|
||||
enum ArrayDataType {
|
||||
INVALID_ARRAY_DATA_TYPE = 0;
|
||||
FLOAT32 = 65568; // 0x10000 | 32
|
||||
DOUBLE = 65600; // 0x10000 | 64
|
||||
INT32 = 131104; // 0x20000 | 32
|
||||
}
|
||||
|
||||
// The required or default shape
|
||||
//
|
||||
// If specificationVersion <= 2 or ShapeFlexibility is empty,
|
||||
// shape is the required fixed shape
|
||||
//
|
||||
// If ShapeFlexibility is present, shape indicate a "default"
|
||||
// shape which must be consistent with the flexibilty specified
|
||||
|
||||
repeated int64 shape = 1;
|
||||
|
||||
ArrayDataType dataType = 2;
|
||||
|
||||
message Shape {
|
||||
repeated int64 shape = 1;
|
||||
}
|
||||
|
||||
message EnumeratedShapes {
|
||||
repeated Shape shapes = 1;
|
||||
}
|
||||
|
||||
message ShapeRange {
|
||||
// sizeRanges.size() must be length 1 or 3
|
||||
// sizeRanges[d] specifies the allowed range for dimension d
|
||||
repeated SizeRange sizeRanges = 1;
|
||||
}
|
||||
|
||||
// For specification version >= 3 you can specify image size flexibility.
|
||||
|
||||
oneof ShapeFlexibility {
|
||||
|
||||
// Use enumeratedShapes for a set of distinct fixed shapes
|
||||
//
|
||||
// If the shape field is specified then it must be
|
||||
// one of the enumerated shapes.
|
||||
///
|
||||
// If shape is not specifed, the "default" shape will be considered
|
||||
// enumeratedShapes[0]
|
||||
//
|
||||
// Must be non-empty
|
||||
|
||||
EnumeratedShapes enumeratedShapes = 21;
|
||||
|
||||
// Use shapeRange to allow the size of each dimension vary within
|
||||
// indpendently specified ranges
|
||||
//
|
||||
// If you specify shape above it must fall in the range
|
||||
// specified in shapeRanges. It will be treated as the default shape.
|
||||
//
|
||||
// If you don't specify shape above then the default shape will
|
||||
// have shape[d] = shapeRange.sizeRanges[d].lowerBound
|
||||
|
||||
ShapeRange shapeRange = 31;
|
||||
|
||||
}
|
||||
|
||||
oneof defaultOptionalValue {
|
||||
int32 intDefaultValue = 41;
|
||||
float floatDefaultValue = 51;
|
||||
double doubleDefaultValue = 61;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* The dictionary feature type.
|
||||
*/
|
||||
message DictionaryFeatureType {
|
||||
/**
|
||||
* Key/value type tags, with the following restrictions:
|
||||
* - ``keyType`` must be a hashable type
|
||||
* - ``valueType`` is assumed to be a ``double``
|
||||
*/
|
||||
oneof KeyType {
|
||||
Int64FeatureType int64KeyType = 1;
|
||||
StringFeatureType stringKeyType = 2;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* The Sequence feature type.
|
||||
*/
|
||||
message SequenceFeatureType {
|
||||
|
||||
/**
|
||||
* Currently only categorical int64 and String sequences are supported
|
||||
*/
|
||||
oneof Type {
|
||||
Int64FeatureType int64Type = 1;
|
||||
StringFeatureType stringType = 3;
|
||||
}
|
||||
|
||||
// Range of allowed size/length/count of sequence
|
||||
SizeRange sizeRange = 101;
|
||||
}
|
||||
|
||||
/**
|
||||
* A feature, which may be optional.
|
||||
*/
|
||||
message FeatureType {
|
||||
oneof Type {
|
||||
Int64FeatureType int64Type = 1;
|
||||
DoubleFeatureType doubleType = 2;
|
||||
StringFeatureType stringType = 3;
|
||||
ImageFeatureType imageType = 4;
|
||||
ArrayFeatureType multiArrayType = 5;
|
||||
DictionaryFeatureType dictionaryType = 6;
|
||||
SequenceFeatureType sequenceType = 7;
|
||||
}
|
||||
|
||||
bool isOptional = 1000;
|
||||
}
|
||||
|
|
@ -0,0 +1,164 @@
|
|||
// Copyright (c) 2017, Apple Inc. All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-3-clause license that can be
|
||||
// found in LICENSE.txt or at https://opensource.org/licenses/BSD-3-Clause
|
||||
|
||||
|
||||
syntax = "proto3";
|
||||
option optimize_for = LITE_RUNTIME;
|
||||
|
||||
import public "NeuralNetwork.proto";
|
||||
|
||||
package CoreML.Specification;
|
||||
|
||||
/**
|
||||
* A feature description,
|
||||
* consisting of a name, short description, and type.
|
||||
*/
|
||||
message FeatureDescription {
|
||||
string name = 1;
|
||||
string shortDescription = 2;
|
||||
FeatureType type = 3;
|
||||
}
|
||||
|
||||
/**
|
||||
* Model metadata,
|
||||
* consisting of a short description, a version string,
|
||||
* an author, a license, and any other user defined
|
||||
* key/value meta data.
|
||||
*/
|
||||
message Metadata {
|
||||
string shortDescription = 1;
|
||||
string versionString = 2;
|
||||
string author = 3;
|
||||
string license = 4;
|
||||
map<string, string> userDefined = 100;
|
||||
}
|
||||
|
||||
/**
|
||||
* A description of a model,
|
||||
* consisting of descriptions of its input and output features.
|
||||
* Both regressor and classifier models require the name of the
|
||||
* primary predicted output feature (``predictedFeatureName``).
|
||||
* Classifier models can specify the output feature containing
|
||||
* probabilities for the predicted classes
|
||||
* (``predictedProbabilitiesName``).
|
||||
*/
|
||||
message ModelDescription {
|
||||
repeated FeatureDescription input = 1;
|
||||
repeated FeatureDescription output = 10;
|
||||
|
||||
// [Required for regressor and classifier models]: the name
|
||||
// to give to an output feature containing the prediction.
|
||||
string predictedFeatureName = 11;
|
||||
|
||||
// [Optional for classifier models]: the name to give to an
|
||||
// output feature containing a dictionary mapping class
|
||||
// labels to their predicted probabilities. If not specified,
|
||||
// the dictionary will not be returned by the model.
|
||||
string predictedProbabilitiesName = 12;
|
||||
|
||||
repeated FeatureDescription trainingInput = 50;
|
||||
|
||||
Metadata metadata = 100;
|
||||
}
|
||||
|
||||
message SerializedModel {
|
||||
// Identifier whose content describes the model type of the serialized protocol buffer message.
|
||||
string identifier = 1;
|
||||
|
||||
// Must be a valid serialized protocol buffer of the above specified type.
|
||||
bytes model = 2;
|
||||
}
|
||||
|
||||
/**
|
||||
* A Core ML model,
|
||||
* consisting of a specification version,
|
||||
* a model description, and a model type.
|
||||
*
|
||||
* Core ML model compatibility is indicated by
|
||||
* a monotonically increasing specification version number,
|
||||
* which is incremented anytime a backward-incompatible change is made
|
||||
* (this is functionally equivalent to the MAJOR version number
|
||||
* described by `Semantic Versioning 2.0.0 <http://semver.org/>`_).
|
||||
*
|
||||
* Specification Versions : OS Availability (Core ML Version)
|
||||
*
|
||||
* 1 : iOS 11, macOS 10.13, tvOS 11, watchOS 4 (Core ML 1)
|
||||
* - Feedforward & Recurrent Neural Networks
|
||||
* - General Linear Models
|
||||
* - Tree Ensembles
|
||||
* - Support Vector Machines
|
||||
* - Pipelines
|
||||
* - Feature Engineering
|
||||
*
|
||||
* 2 : iOS 11.2, macOS 10.13.2, tvOS 11.2, watchOS 4.2 (Core ML 1.2)
|
||||
* - Custom Layers for Neural Networks
|
||||
* - Float 16 support for Neural Network layers
|
||||
*
|
||||
* 3 : iOS 12, macOS 10.14, tvOS 12, watchOS 5 (Core ML 2)
|
||||
* - Flexible shapes and image sizes
|
||||
* - Categorical sequences
|
||||
* - Core ML Vision Feature Print, Text Classifier, Word Tagger
|
||||
* - Non Max Suppression
|
||||
* - Crop and Resize Bilinear NN layers
|
||||
* - Custom Models
|
||||
*
|
||||
* 4 : iOS 13, macOS 10.15, tvOS 13, watchOS 6 (Core ML 3)
|
||||
* - Updatable models
|
||||
* - Exact shape / general rank mapping for neural networks
|
||||
* - Large expansion of supported neural network layers
|
||||
* - Generalized operations
|
||||
* - Control flow
|
||||
* - Dynamic layers
|
||||
* - See NeuralNetwork.proto
|
||||
* - Nearest Neighbor Classifier
|
||||
* - Sound Analysis Prepreocessing
|
||||
* - Recommender
|
||||
* - Linked Model
|
||||
* - NLP Gazeteer
|
||||
* - NLP WordEmbedding
|
||||
*
|
||||
* 5 : iOS 14, macOS 11, tvOS 14, watchOS 7 (Core ML 4)
|
||||
* - Model Deployment
|
||||
* - Model Encryption
|
||||
* - Unified converter API with PyTorch and Tensorflow 2 Support in coremltools 4
|
||||
* - MIL builder for neural networks and composite ops in coremltools 4
|
||||
* - New layers in neural network:
|
||||
* - CumSum
|
||||
* - OneHot
|
||||
* - ClampedReLu
|
||||
* - ArgSort
|
||||
* - SliceBySize
|
||||
* - Convolution3D
|
||||
* - Pool3D
|
||||
* - Bilinear Upsample with align corners and fractional factors
|
||||
* - PixelShuffle
|
||||
* - MatMul with int8 weights and int8 activations
|
||||
* - Concat interleave
|
||||
* - See NeuralNetwork.proto
|
||||
* - Enhanced Xcode model view with interactive previews
|
||||
* - Enhanced Xcode Playground support for Core ML models
|
||||
*
|
||||
*/
|
||||
message Model {
|
||||
int32 specificationVersion = 1;
|
||||
ModelDescription description = 2;
|
||||
|
||||
/*
|
||||
* Following model types support on-device update:
|
||||
*
|
||||
* - NeuralNetworkClassifier
|
||||
* - NeuralNetworkRegressor
|
||||
* - NeuralNetwork
|
||||
* - KNearestNeighborsClassifier
|
||||
*/
|
||||
bool isUpdatable = 10;
|
||||
|
||||
// start at 200 here
|
||||
// model specific parameters:
|
||||
oneof Type {
|
||||
// generic models start at 500
|
||||
NeuralNetwork neuralNetwork = 500;
|
||||
}
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,52 @@
|
|||
// Copyright (c) 2017, Apple Inc. All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-3-clause license that can be
|
||||
// found in LICENSE.txt or at https://opensource.org/licenses/BSD-3-Clause
|
||||
|
||||
syntax = "proto3";
|
||||
option optimize_for = LITE_RUNTIME;
|
||||
|
||||
import public "DataStructures.proto";
|
||||
|
||||
package CoreML.Specification;
|
||||
|
||||
/**
|
||||
* Int64 parameter,
|
||||
* consisting of a default int64 value, and allowed range or set of values
|
||||
* value is unbounded if AllowedValues is not set.
|
||||
*/
|
||||
message Int64Parameter {
|
||||
int64 defaultValue = 1;
|
||||
oneof AllowedValues {
|
||||
Int64Range range = 10;
|
||||
Int64Set set = 11;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Double parameter,
|
||||
* consisting of a default double value, and allowed range of values
|
||||
* value is unbounded if AllowedValues is not set.
|
||||
*/
|
||||
message DoubleParameter {
|
||||
double defaultValue = 1;
|
||||
oneof AllowedValues {
|
||||
DoubleRange range = 10;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* String parameter,
|
||||
* A default string value must be provided
|
||||
*/
|
||||
message StringParameter {
|
||||
string defaultValue = 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* String parameter,
|
||||
* A default bool value must be provided
|
||||
*/
|
||||
message BoolParameter {
|
||||
bool defaultValue = 1;
|
||||
}
|
Loading…
Reference in New Issue