!25474 [MSLITE] enable runtime convert in x86 test

Merge pull request !25474 from ling/sr
This commit is contained in:
i-robot 2021-10-28 06:09:41 +00:00 committed by Gitee
commit cd2c7d0b9d
10 changed files with 23 additions and 13 deletions

View File

@ -30,7 +30,7 @@ option(MSLITE_ENABLE_FP16 "Whether to compile Fp16 operator" off)
option(MSLITE_ENABLE_ACL "enable ACL" off)
option(MSLITE_ENABLE_MODEL_ENCRYPTION "enable model encryption, only converter support" on)
option(MSLITE_ENABLE_SPARSE_COMPUTE "enable sparse kernel" off)
option(MSLITE_ENABLE_RUNTIME_CONVERT "enable runtime convert" off)
option(MSLITE_ENABLE_RUNTIME_CONVERT "enable runtime convert" on)
option(MSLITE_ENABLE_RUNTIME_GLOG "enable runtime glog" off)
#Option that can be configured through manually

View File

@ -39,6 +39,7 @@ write_commit_file() {
build_lite_x86_64_jni_and_jar() {
X86_JNI_CMAKE_ARGS=$1
export MSLITE_ENABLE_RUNTIME_CONVERT=off
# copy x86 so
local is_train=on
cd ${BASEPATH}/output/tmp

View File

@ -125,6 +125,7 @@ cp scripts/*.sh ${PACKAGE}/
# Copy the shared MindSpore ToD library
tar -xzf ${TARBALL}
mv mindspore-*/runtime/lib ${PACKAGE}/
mv mindspore-*/runtime/third_party/glog/* ${PACKAGE}/lib/
mv mindspore-*/runtime/third_party/libjpeg-turbo/lib/* ${PACKAGE}/lib/
cd mindspore-*
if [[ "${TARGET}" == "arm64" ]] && [[ -d "runtime/third_party/hiai_ddk/lib" ]]; then

View File

@ -64,6 +64,7 @@ fi
tar xzvf ${BASEPATH}/build/${MINDSPORE_FILE} -C ${BASEPATH}/build/${MINDSPORE_FILE_NAME} --strip-components=1
cp -r ${BASEPATH}/build/${MINDSPORE_FILE_NAME}/runtime/lib/* ${BASEPATH}/lib
cp -r ${BASEPATH}/build/${MINDSPORE_FILE_NAME}/runtime/third_party/glog/* ${BASEPATH}/lib
cp ${BASEPATH}/build/${MINDSPORE_FILE_NAME}/runtime/third_party/libjpeg-turbo/lib/libjpeg.so.62 ${BASEPATH}/lib
cd ${BASEPATH}/ || exit

View File

@ -114,6 +114,7 @@ cp scripts/*.sh ${PACKAGE}/
# Copy the shared MindSpore ToD library
tar -xzf ${TARBALL}
mv mindspore-*/runtime/lib ${PACKAGE}/
mv mindspore-*/runtime/third_party/glog/* ${PACKAGE}/lib/
mv mindspore-*/runtime/third_party/libjpeg-turbo/lib/* ${PACKAGE}/lib/
cd mindspore-*
if [[ "${TARGET}" == "arm64" ]] && [[ -d "runtime/third_party/hiai_ddk/lib" ]]; then

View File

@ -12,6 +12,10 @@ if(MSLITE_ENABLE_V0)
add_definitions(-DENABLE_V0)
endif()
if(MSLITE_ENABLE_RUNTIME_CONVERT)
add_definitions(-DRUNTIME_CONVERT)
endif()
file(GLOB_RECURSE TEST_UT_SRC
${TEST_DIR}/main.cc
${TEST_DIR}/common/common_test.cc

View File

@ -102,6 +102,7 @@ echo 'runtime pass'
echo 'runtime convert'
./lite-test --gtest_filter="RuntimeConvert.*"
./lite-test --gtest_filter="BenchmarkTest.runtimeConvert1"
echo 'Optimize Allocator'
./lite-test --gtest_filter="OptAllocator.*"

View File

@ -108,5 +108,13 @@ TEST_F(BenchmarkTest, mindrtParallelOffline3) {
int status = mindspore::lite::RunBenchmark(6, benchmark_argv3);
ASSERT_EQ(status, lite::RET_OK);
}
#ifdef RUNTIME_CONVERT
TEST_F(BenchmarkTest, runtimeConvert1) {
const char *benchmark_argv3[] = {"./benchmark", "--modelFile=./relu.mindir", "--loopCount=1", "--warmUpLoopCount=0"};
int status = mindspore::lite::RunBenchmark(4, benchmark_argv3);
ASSERT_EQ(status, lite::RET_OK);
}
#endif
} // namespace lite
} // namespace mindspore

View File

@ -39,7 +39,8 @@ function Run_Tensorrt() {
chmod +x ./tools/benchmark/benchmark
# copy related files to benchmark_test
cp -a ./tools/benchmark/benchmark ${benchmark_test_path}/benchmark || exit 1
cp -a ./runtime/lib/libmindspore-lite.so ${benchmark_test_path}/libmindspore-lite.so || exit 1
cp -a ./runtime/lib/lib*.so* ${benchmark_test_path}/ || exit 1
cp -a ./runtime/third_party/glog/libglog.so.0 ${benchmark_test_path}/libglog.so.0 || exit 1
echo "start push files to nvidia device ${device_ip} : ${cuda_device_id}"
ssh tensorrt@${device_ip} "cd ${device_benchmark_test_path}; rm -rf ./*"

View File

@ -446,15 +446,8 @@ int BenchmarkUnifiedApi::RunBenchmark() {
// Load graph
std::string model_name = flags_->model_file_.substr(flags_->model_file_.find_last_of(DELIM_SLASH) + 1);
MS_LOG(INFO) << "start reading model file";
std::cout << "start reading model file" << std::endl;
size_t size = 0;
char *graph_buf = ReadFile(flags_->model_file_.c_str(), &size);
if (graph_buf == nullptr) {
MS_LOG(ERROR) << "Read model file failed while running " << model_name.c_str();
std::cerr << "Read model file failed while running " << model_name.c_str() << std::endl;
return RET_ERROR;
}
MS_LOG(INFO) << "start unified benchmark run";
std::cout << "start unified benchmark run" << std::endl;
auto context = std::make_shared<mindspore::Context>();
if (context == nullptr) {
@ -473,8 +466,7 @@ int BenchmarkUnifiedApi::RunBenchmark() {
}
}
auto ret = ms_model_.Build(graph_buf, size, kMindIR, context);
delete[] graph_buf;
auto ret = ms_model_.Build(flags_->model_file_, kMindIR, context);
if (ret != kSuccess) {
MS_LOG(ERROR) << "ms_model_.Build failed while running ", model_name.c_str();
std::cout << "ms_model_.Build failed while running ", model_name.c_str();