forked from mindspore-Ecosystem/mindspore
support sd3403 davinci
This commit is contained in:
parent
04338cca5b
commit
506417d92b
|
@ -445,7 +445,7 @@ if(PLATFORM_ARM64)
|
|||
COMPONENT ${RUNTIME_COMPONENT_NAME})
|
||||
endif()
|
||||
elseif(TARGET_MIX210)
|
||||
if(${MSLITE_REGISTRY_DEVICE} STREQUAL "SD3403")
|
||||
if(${MSLITE_REGISTRY_DEVICE} STREQUAL "SD3403" AND (NOT MSLITE_ENABLE_ACL))
|
||||
install(FILES ${TOP_DIR}/mindspore/lite/build/tools/benchmark/dpico/${DPICO_ACL_ADAPTER_LIB_NAME}.so
|
||||
DESTINATION ${PROVIDERS_LIB_DIR}/${MSLITE_REGISTRY_DEVICE}
|
||||
COMPONENT ${RUNTIME_COMPONENT_NAME})
|
||||
|
|
|
@ -249,7 +249,7 @@ elseif(PLATFORM_ARM32)
|
|||
elseif(WIN32)
|
||||
set(MSLITE_GPU_BACKEND "off")
|
||||
else()
|
||||
if(${MSLITE_REGISTRY_DEVICE} STREQUAL "SD3403")
|
||||
if(${MSLITE_REGISTRY_DEVICE} STREQUAL "SD3403" AND (NOT MSLITE_ENABLE_ACL))
|
||||
set(MSLITE_ENABLE_DPICO_ATC_ADAPTER on)
|
||||
endif()
|
||||
if(MSLITE_GPU_BACKEND STREQUAL "")
|
||||
|
@ -463,8 +463,19 @@ if(MSLITE_ENABLE_ACL)
|
|||
else()
|
||||
set(ASCEND_PATH /usr/local/Ascend)
|
||||
endif()
|
||||
set(ASCEND_CANN_RUNTIME_PATH ${ASCEND_PATH}/latest/lib64)
|
||||
set(ASCEND_TOOLKIT_RUNTIME_PATH ${ASCEND_PATH}/ascend-toolkit/latest/lib64)
|
||||
if("${MSLITE_REGISTRY_DEVICE}" STREQUAL "SD3403")
|
||||
if(PLATFORM_ARM64)
|
||||
set(ASCEND_CANN_RUNTIME_PATH ${ASCEND_PATH}/ascend-toolkit/latest/arm64-lmixlinux200/lib64)
|
||||
elseif(PLATFORM_ARM32 OR WIN32)
|
||||
message(FATAL_ERROR "SD3403 compilation only supports x86 and arm64 platform")
|
||||
else()
|
||||
add_compile_definitions(SUPPORT_SD3403_DAVINCI)
|
||||
set(ASCEND_CANN_RUNTIME_PATH ${ASCEND_PATH}/ascend-toolkit/latest/x86_64-linux/lib64)
|
||||
endif()
|
||||
else()
|
||||
set(ASCEND_CANN_RUNTIME_PATH ${ASCEND_PATH}/latest/lib64)
|
||||
set(ASCEND_TOOLKIT_RUNTIME_PATH ${ASCEND_PATH}/ascend-toolkit/latest/lib64)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
set(PKG_NAME_PREFIX mindspore-lite-${VERSION_STR})
|
||||
|
|
|
@ -559,7 +559,7 @@ endif()
|
|||
|
||||
########################## build optimize and float16 library #################################
|
||||
if(PLATFORM_ARM)
|
||||
if(PLATFORM_ARM64 AND NOT TARGET_HIMIX AND NOT MACHINE_LINUX_ARM64)
|
||||
if(PLATFORM_ARM64 AND NOT TARGET_HIMIX AND NOT TARGET_MIX210 AND NOT MACHINE_LINUX_ARM64)
|
||||
target_link_libraries(mindspore-lite cpu_opt_kernel_mid nnacl_optimize_mid)
|
||||
target_link_libraries(mindspore-lite_static cpu_opt_kernel_mid nnacl_optimize_mid)
|
||||
endif()
|
||||
|
|
|
@ -57,12 +57,16 @@ AclModelOptions::AclModelOptions(const std::shared_ptr<Context> &context) {
|
|||
fusion_switch_cfg_path_ = ascend_info->GetFusionSwitchConfigPath();
|
||||
device_id_ = ascend_info->GetDeviceID();
|
||||
buffer_optimize_mode_ = ascend_info->GetBufferOptimizeMode();
|
||||
#ifndef SUPPORT_SD3403_DAVINCI
|
||||
const char *soc_name = aclrtGetSocName();
|
||||
if (soc_name == nullptr) {
|
||||
MS_LOG(WARNING) << "Get soc version failed.";
|
||||
return;
|
||||
}
|
||||
soc_version_ = soc_name;
|
||||
#else
|
||||
soc_version = "OPTG";
|
||||
#endif
|
||||
}
|
||||
|
||||
void AclModelOptions::RenameInput(const std::vector<std::string> &input_names) {
|
||||
|
|
|
@ -6,7 +6,15 @@ aux_source_directory(src ACL_SRC)
|
|||
add_library(ascend_kernel_mid OBJECT ${ACL_SRC})
|
||||
|
||||
add_dependencies(ascend_kernel_mid fbs_inner_src)
|
||||
|
||||
target_link_libraries(ascend_kernel_mid ${ge_graph} ${ge_compiler}
|
||||
if("${MSLITE_REGISTRY_DEVICE}" STREQUAL "SD3403" AND PLATFORM_ARM64)
|
||||
find_library(ge_graph libgraph.so ${ASCEND_CANN_RUNTIME_PATH} ${ASCEND_TOOLKIT_RUNTIME_PATH})
|
||||
find_library(acl libascendcl.so ${ASCEND_CANN_RUNTIME_PATH} ${ASCEND_TOOLKIT_RUNTIME_PATH})
|
||||
find_library(acl_retr libacl_retr.so ${ASCEND_CANN_RUNTIME_PATH} ${ASCEND_TOOLKIT_RUNTIME_PATH})
|
||||
find_library(acl_cblas libacl_cblas.so ${ASCEND_CANN_RUNTIME_PATH} ${ASCEND_TOOLKIT_RUNTIME_PATH})
|
||||
find_library(acl_runtime libruntime.so ${ASCEND_CANN_RUNTIME_PATH} ${ASCEND_TOOLKIT_RUNTIME_PATH})
|
||||
target_link_libraries(ascend_kernel_mid ${ge_graph} ${acl} ${acl_retr} ${acl_cblas} ${acl_runtime})
|
||||
else()
|
||||
target_link_libraries(ascend_kernel_mid ${ge_graph} ${ge_compiler}
|
||||
${acl_retr} ${acl_cblas} ${acl_dvpp} ${acl_runtime} ${libplatform}
|
||||
${libcompress} ${libopskernel} ${libaicore_utils} ${libaicpu_engine_common} ${acl})
|
||||
endif()
|
|
@ -140,7 +140,7 @@ void Convolution1x1Int8CPUKernel::CheckSupportOptimize() {
|
|||
support_optimize_ = false;
|
||||
matmul_func_ = MatMulInt8_4x16_r;
|
||||
#if defined(ENABLE_ARM64)
|
||||
#if !defined(SUPPORT_NNIE) && !defined(MACHINE_LINUX_ARM64)
|
||||
#if !defined(SUPPORT_NNIE) && !defined(SUPPORT_34XX) && !defined(MACHINE_LINUX_ARM64)
|
||||
if (mindspore::lite::IsSupportSDot()) {
|
||||
support_optimize_ = true;
|
||||
matmul_func_ = MatMulDpInt8_optimize_handler;
|
||||
|
@ -148,7 +148,7 @@ void Convolution1x1Int8CPUKernel::CheckSupportOptimize() {
|
|||
#endif
|
||||
support_optimize_ = false;
|
||||
matmul_func_ = nullptr;
|
||||
#if !defined(SUPPORT_NNIE) && !defined(MACHINE_LINUX_ARM64)
|
||||
#if !defined(SUPPORT_NNIE) && !defined(SUPPORT_34XX) && !defined(MACHINE_LINUX_ARM64)
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
|
|
|
@ -35,7 +35,7 @@ void ConvolutionInt8CPUKernel::CheckSupportOptimize() {
|
|||
#endif
|
||||
|
||||
#if defined(ENABLE_ARM64)
|
||||
#if !defined(SUPPORT_NNIE) && !defined(MACHINE_LINUX_ARM64)
|
||||
#if !defined(SUPPORT_NNIE) && !defined(SUPPORT_34XX) && !defined(MACHINE_LINUX_ARM64)
|
||||
if (mindspore::lite::IsSupportSDot()) {
|
||||
matmul_func_ = MatMulRInt8_optimize_handler;
|
||||
support_optimize_ = true;
|
||||
|
@ -43,7 +43,7 @@ void ConvolutionInt8CPUKernel::CheckSupportOptimize() {
|
|||
#endif
|
||||
tile_num_ = 4;
|
||||
support_optimize_ = false;
|
||||
#if !defined(SUPPORT_NNIE) && !defined(MACHINE_LINUX_ARM64)
|
||||
#if !defined(SUPPORT_NNIE) && !defined(SUPPORT_34XX) && !defined(MACHINE_LINUX_ARM64)
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
|
|
|
@ -117,7 +117,7 @@ void DeConvInt8CPUKernel::CheckSupportOptimize() {
|
|||
support_optimize_ = true;
|
||||
matmul_func_ = MatMulInt8_16x4;
|
||||
#ifdef ENABLE_ARM64
|
||||
#if !defined(SUPPORT_NNIE) && !defined(MACHINE_LINUX_ARM64)
|
||||
#if !defined(SUPPORT_NNIE) && !defined(SUPPORT_34XX) && !defined(MACHINE_LINUX_ARM64)
|
||||
if (mindspore::lite::IsSupportSDot()) {
|
||||
support_optimize_ = true;
|
||||
matmul_func_ = MatMulR4Int8_optimize_handler;
|
||||
|
@ -125,7 +125,7 @@ void DeConvInt8CPUKernel::CheckSupportOptimize() {
|
|||
#endif
|
||||
support_optimize_ = false;
|
||||
matmul_func_ = MatMulR4Int8Neon64;
|
||||
#if !defined(SUPPORT_NNIE) && !defined(MACHINE_LINUX_ARM64)
|
||||
#if !defined(SUPPORT_NNIE) && !defined(SUPPORT_34XX) && !defined(MACHINE_LINUX_ARM64)
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
|
|
|
@ -33,7 +33,7 @@ int MatmulBaseInt8Run(void *cdata, int task_id, float, float) {
|
|||
return RET_OK;
|
||||
}
|
||||
|
||||
#if defined(ENABLE_ARM64) && !defined(SUPPORT_NNIE) && (!defined(MACHINE_LINUX_ARM64))
|
||||
#if defined(ENABLE_ARM64) && !defined(SUPPORT_NNIE) && !defined(SUPPORT_34XX) && (!defined(MACHINE_LINUX_ARM64))
|
||||
int Arm64SdotPreRun(void *cdata, int task_id, float, float) {
|
||||
CHECK_NULL_RETURN(cdata);
|
||||
auto op = reinterpret_cast<MatmulBaseInt8CPUKernel *>(cdata);
|
||||
|
@ -456,7 +456,7 @@ int MatmulBaseInt8CPUKernel::ReSize() {
|
|||
return RET_OK;
|
||||
}
|
||||
|
||||
#if defined(ENABLE_ARM64) && !defined(SUPPORT_NNIE) && (!defined(MACHINE_LINUX_ARM64))
|
||||
#if defined(ENABLE_ARM64) && !defined(SUPPORT_NNIE) && !defined(SUPPORT_34XX) && (!defined(MACHINE_LINUX_ARM64))
|
||||
int MatmulBaseInt8CPUKernel::RunArm64Sdot() {
|
||||
int8_t *a_ptr = reinterpret_cast<int8_t *>(in_tensors_.at(0)->data());
|
||||
int8_t *b_ptr = reinterpret_cast<int8_t *>(in_tensors_.at(1)->data());
|
||||
|
@ -489,7 +489,7 @@ int MatmulBaseInt8CPUKernel::RunArm64Sdot() {
|
|||
#endif
|
||||
|
||||
int MatmulBaseInt8CPUKernel::Run() {
|
||||
#if defined(ENABLE_ARM64) && !defined(SUPPORT_NNIE) && (!defined(MACHINE_LINUX_ARM64))
|
||||
#if defined(ENABLE_ARM64) && !defined(SUPPORT_NNIE) && !defined(SUPPORT_34XX) && (!defined(MACHINE_LINUX_ARM64))
|
||||
if (support_sdot_) {
|
||||
return RunArm64Sdot();
|
||||
}
|
||||
|
|
|
@ -128,7 +128,7 @@ int MatMulDynamicSdotInt8Kernel::MatMulDynamicArm64SdotImpl(int task_id) {
|
|||
bias += col_offset;
|
||||
}
|
||||
|
||||
#if defined(ENABLE_ARM64) && !defined(SUPPORT_NNIE) && (!defined(MACHINE_LINUX_ARM64))
|
||||
#if defined(ENABLE_ARM64) && !defined(SUPPORT_NNIE) && !defined(SUPPORT_34XX) && (!defined(MACHINE_LINUX_ARM64))
|
||||
DynamicMatmulSdot4x4x16AIWI(a_ptr, b_ptr, out_ptr, param_->deep_align_, multi_scale.data() + c, bias, row, col,
|
||||
out_stride, input_sums_ptr, weight_sums_ptr, quant_param_->input_zp_,
|
||||
quant_param_->filter_zp_[0] * param_->deep_);
|
||||
|
|
|
@ -22,7 +22,7 @@ if(TARGET_HIMIX)
|
|||
link_directories(${PROVIDERS_DIR}/nnie/third_patry/hi3559_sdk/lib)
|
||||
add_compile_definitions(BENCHMARK_CLIP_JSON)
|
||||
endif()
|
||||
elseif(TARGET_MIX210)
|
||||
elseif(TARGET_MIX210 AND (NOT MSLITE_ENABLE_ACL))
|
||||
set(CMAKE_SKIP_BUILD_RPATH on)
|
||||
set(BENCHMARK_LINK_LIB ${BENCHMARK_LINK_LIB} mindspore::json pthread
|
||||
dpico_acl_adapter stdc++)
|
||||
|
|
|
@ -30,18 +30,18 @@ STATUS Conv2DFusionMapper::Mapper(const CNodePtr &cnode) {
|
|||
MS_LOG(ERROR) << "Get primitive from cnode failed.";
|
||||
return lite::RET_ERROR;
|
||||
}
|
||||
ops::Conv2D conv2d_op;
|
||||
PrimitivePtr dst_prim = conv2d_op.GetPrim();
|
||||
#ifndef SUPPORT_SD3403_DAVINCI
|
||||
bool is_depth_wise = false;
|
||||
auto depth_wise_ptr = src_prim->GetAttr(ops::kIsDepthWise);
|
||||
if (depth_wise_ptr != nullptr) {
|
||||
is_depth_wise = GetValue<bool>(depth_wise_ptr);
|
||||
}
|
||||
PrimitivePtr dst_prim = nullptr;
|
||||
if (!is_depth_wise) {
|
||||
ops::Conv2D conv2d_op;
|
||||
dst_prim = conv2d_op.GetPrim();
|
||||
} else {
|
||||
if (is_depth_wise) {
|
||||
dst_prim = std::make_shared<acl::DepthwiseConv2dNative>();
|
||||
}
|
||||
#endif
|
||||
CHECK_NULL_RETURN(dst_prim);
|
||||
dst_prim->SetAttrs(src_prim->attrs());
|
||||
auto status = AttrAdjust(dst_prim, ops::kStride);
|
||||
|
|
|
@ -73,8 +73,13 @@ STATUS UpsampleMapper::AttrAdjust(const PrimitivePtr &src_prim, const ValueNodeP
|
|||
MS_LOG(DEBUG) << "The scale value: " << scale[1];
|
||||
auto dst_prim = std::make_shared<acl::Upsample>();
|
||||
CHECK_NULL_RETURN(dst_prim);
|
||||
#ifndef SUPPORT_SD3403_DAVINCI
|
||||
float attr_scale = 1;
|
||||
dst_prim->AddAttr("scale", MakeValue(attr_scale));
|
||||
#else
|
||||
dst_prim->AddAttr("scale", MakeValue(scale[1]));
|
||||
#endif
|
||||
|
||||
int64_t stride_h = static_cast<int64_t>(scale[1]);
|
||||
int64_t stride_w = stride_h;
|
||||
dst_prim->AddAttr("stride_h", MakeValue(stride_h));
|
||||
|
|
Loading…
Reference in New Issue