add build option to support link tensor-rt

This commit is contained in:
wilfChen 2021-04-25 15:39:28 +08:00
parent 6801ef61e0
commit f47a6a619b
2 changed files with 20 additions and 4 deletions

View File

@ -27,6 +27,7 @@ usage()
echo " [-P on|off] [-z [on|off]] [-M on|off] [-V 9.2|10.1|310|910] [-I arm64|arm32|x86_64] [-K] \\" echo " [-P on|off] [-z [on|off]] [-M on|off] [-V 9.2|10.1|310|910] [-I arm64|arm32|x86_64] [-K] \\"
echo " [-B on|off] [-E] [-l on|off] [-n full|lite|off] [-T on|off] [-H on|off] \\" echo " [-B on|off] [-E] [-l on|off] [-n full|lite|off] [-T on|off] [-H on|off] \\"
echo " [-A [cpp|java|object-c] [-C on|off] [-o on|off] [-S on|off] [-k on|off] [-W sse|neon|avx|off] \\" echo " [-A [cpp|java|object-c] [-C on|off] [-o on|off] [-S on|off] [-k on|off] [-W sse|neon|avx|off] \\"
echo " [-L Tensor-RT path] \\"
echo "" echo ""
echo "Options:" echo "Options:"
echo " -d Debug mode" echo " -d Debug mode"
@ -64,6 +65,7 @@ usage()
echo " -k Enable make clean, clean up compilation generated cache " echo " -k Enable make clean, clean up compilation generated cache "
echo " -W Enable x86_64 SSE or AVX instruction set, use [sse|avx|neon|off], default off" echo " -W Enable x86_64 SSE or AVX instruction set, use [sse|avx|neon|off], default off"
echo " -H Enable hidden" echo " -H Enable hidden"
echo " -L Link and specify Tensor-RT library path, default disable Tensor-RT lib linking"
} }
# check value of input is 'on' or 'off' # check value of input is 'on' or 'off'
@ -122,9 +124,11 @@ checkopts()
ENABLE_NPU="off" ENABLE_NPU="off"
ENABLE_HIDDEN="on" ENABLE_HIDDEN="on"
LITE_ENABLE_GPU="" LITE_ENABLE_GPU=""
TENSORRT_HOME=""
# Process the options # Process the options
while getopts 'drvj:c:t:hsb:a:g:p:ie:m:l:I:RP:D:zM:V:K:B:En:T:A:C:o:S:k:W:H:' opt while getopts 'drvj:c:t:hsb:a:g:p:ie:m:l:I:RP:D:zM:V:K:B:En:T:A:C:o:S:k:W:H:L:' opt
do do
CASE_SENSIVE_ARG=${OPTARG}
OPTARG=$(echo ${OPTARG} | tr '[A-Z]' '[a-z]') OPTARG=$(echo ${OPTARG} | tr '[A-Z]' '[a-z]')
case "${opt}" in case "${opt}" in
d) d)
@ -333,6 +337,11 @@ checkopts()
ENABLE_HIDDEN="$OPTARG" ENABLE_HIDDEN="$OPTARG"
echo "${OPTARG} hidden" echo "${OPTARG} hidden"
;; ;;
L)
ENABLE_TRT="on"
TENSORRT_HOME="$CASE_SENSIVE_ARG"
echo "Link Tensor-RT library. Path: ${CASE_SENSIVE_ARG}"
;;
*) *)
echo "Unknown option ${opt}!" echo "Unknown option ${opt}!"
usage usage
@ -489,6 +498,9 @@ build_mindspore()
if [[ "X$ENABLE_HIDDEN" = "Xoff" ]]; then if [[ "X$ENABLE_HIDDEN" = "Xoff" ]]; then
CMAKE_ARGS="${CMAKE_ARGS} -DENABLE_HIDDEN=OFF" CMAKE_ARGS="${CMAKE_ARGS} -DENABLE_HIDDEN=OFF"
fi fi
if [[ "X$ENABLE_TRT" == "Xon" ]]; then
CMAKE_ARGS="${CMAKE_ARGS} -DTENSORRT_HOME=${TENSORRT_HOME}"
fi
echo "${CMAKE_ARGS}" echo "${CMAKE_ARGS}"
if [[ "X$INC_BUILD" = "Xoff" ]]; then if [[ "X$INC_BUILD" = "Xoff" ]]; then
cmake ${CMAKE_ARGS} ../.. cmake ${CMAKE_ARGS} ../..

View File

@ -117,11 +117,15 @@ if(ENABLE_GPU)
"runtime/device/gpu/trt_loader.cc" "runtime/device/gpu/trt_loader.cc"
) )
if(DEFINED ENV{TENSORRT_HOME} AND NOT $ENV{TENSORRT_HOME} STREQUAL "") if(NOT ${TENSORRT_HOME} STREQUAL "")
message("Enable GPU inference. Tensor-RT dir: $ENV{TENSORRT_HOME}") find_path(TENSORRT_HOME_INCLUDE NvInfer.h HINTS ${TENSORRT_HOME}/include)
if(TENSORRT_HOME_INCLUDE STREQUAL TENSORRT_HOME_INCLUDE-NOTFOUND)
message(FATAL_ERROR "Tensor-RT dir not exist ${TENSORRT_HOME}")
endif()
message("Enable GPU inference. Tensor-RT include dir: ${TENSORRT_HOME_INCLUDE}")
set(ENABLE_GPU_INFER TRUE) set(ENABLE_GPU_INFER TRUE)
add_compile_definitions(ENABLE_GPU_INFER) add_compile_definitions(ENABLE_GPU_INFER)
include_directories($ENV{TENSORRT_HOME}/include) include_directories(${TENSORRT_HOME_INCLUDE})
list(APPEND GPU_SRC_LIST ${CMAKE_CURRENT_SOURCE_DIR}/runtime/device/gpu/trt_loader.cc) list(APPEND GPU_SRC_LIST ${CMAKE_CURRENT_SOURCE_DIR}/runtime/device/gpu/trt_loader.cc)
endif() endif()