diff --git a/mindspore/lite/src/extendrt/delegate/factory.cc b/mindspore/lite/src/extendrt/delegate/factory.cc index 516e2e57166..5a50d4d5636 100644 --- a/mindspore/lite/src/extendrt/delegate/factory.cc +++ b/mindspore/lite/src/extendrt/delegate/factory.cc @@ -22,10 +22,10 @@ DelegateRegistry &DelegateRegistry::GetInstance() { } void DelegateRegistry::RegDelegate(const mindspore::DeviceType &device_type, const std::string &provider, - DelegateCreator creator) { + DelegateCreator *creator) { auto it = creator_map_.find(device_type); if (it == creator_map_.end()) { - HashMap map; + HashMap map; map[provider] = creator; creator_map_[device_type] = map; return; @@ -53,6 +53,6 @@ std::shared_ptr DelegateRegistry::GetDelegate(const mindspore::De if (creator_it == it->second.end()) { return nullptr; } - return creator_it->second(ctx, config_infos); + return (*(creator_it->second))(ctx, config_infos); } } // namespace mindspore diff --git a/mindspore/lite/src/extendrt/delegate/factory.h b/mindspore/lite/src/extendrt/delegate/factory.h index 3ae108166ad..417ea0f7f9c 100644 --- a/mindspore/lite/src/extendrt/delegate/factory.h +++ b/mindspore/lite/src/extendrt/delegate/factory.h @@ -40,25 +40,28 @@ class MS_API DelegateRegistry { static DelegateRegistry &GetInstance(); - void RegDelegate(const mindspore::DeviceType &device_type, const std::string &provider, DelegateCreator creator); + void RegDelegate(const mindspore::DeviceType &device_type, const std::string &provider, DelegateCreator *creator); void UnRegDelegate(const mindspore::DeviceType &device_type, const std::string &provider); std::shared_ptr GetDelegate(const mindspore::DeviceType &device_type, const std::string &provider, const std::shared_ptr &ctx, const ConfigInfos &config_infos); private: - mindspore::HashMap> creator_map_; + mindspore::HashMap> creator_map_; }; class DelegateRegistrar { public: - DelegateRegistrar(const mindspore::DeviceType &device_type, const std::string &provider, DelegateCreator creator) { + DelegateRegistrar(const mindspore::DeviceType &device_type, const std::string &provider, DelegateCreator *creator) { DelegateRegistry::GetInstance().RegDelegate(device_type, provider, creator); } ~DelegateRegistrar() = default; }; -#define REG_DELEGATE(device_type, provider, creator) \ - static DelegateRegistrar g_##device_type##provider##Delegate(device_type, provider, creator); +#define REG_DELEGATE(device_type, provider, creator) \ + static DelegateCreator func = [=](const std::shared_ptr &context, const ConfigInfos &config_infos) { \ + return creator(context, config_infos); \ + }; \ + static DelegateRegistrar g_##device_type##provider##Delegate(device_type, provider, &func); } // namespace mindspore #endif // MINDSPORE_LITE_SRC_EXTENDRT_DELEGATE_FACTORY_H_ diff --git a/mindspore/lite/test/config_level0/models_python_gpu.cfg b/mindspore/lite/test/config_level0/models_python_gpu.cfg new file mode 100644 index 00000000000..0f8986bd54d --- /dev/null +++ b/mindspore/lite/test/config_level0/models_python_gpu.cfg @@ -0,0 +1 @@ +yolox.onnx;1:input;1,3,640,640;; diff --git a/mindspore/lite/test/st/scripts/run_benchmark_python.sh b/mindspore/lite/test/st/scripts/run_benchmark_python.sh index 961a8ec74df..67eefe36307 100644 --- a/mindspore/lite/test/st/scripts/run_benchmark_python.sh +++ b/mindspore/lite/test/st/scripts/run_benchmark_python.sh @@ -10,7 +10,7 @@ function Run_python_ST() { backend=$6 mindspore_lite_whl=`ls ${whl_path}/*.whl` if [[ -f "${mindspore_lite_whl}" ]]; then - pip install ${mindspore_lite_whl} --force-reinstall || exit 1 + pip install ${mindspore_lite_whl} --force-reinstall --user || exit 1 echo "install python whl success." else echo "not find python whl.." diff --git a/mindspore/lite/test/st/scripts/run_benchmark_server_inference_tensorrt_cloud.sh b/mindspore/lite/test/st/scripts/run_benchmark_server_inference_tensorrt_cloud.sh index 60b0f3b49fe..75053792e68 100644 --- a/mindspore/lite/test/st/scripts/run_benchmark_server_inference_tensorrt_cloud.sh +++ b/mindspore/lite/test/st/scripts/run_benchmark_server_inference_tensorrt_cloud.sh @@ -1,5 +1,6 @@ #!/bin/bash source ./scripts/base_functions.sh +source ./scripts/run_benchmark_python.sh # Run converter on x86 platform: function Run_Converter() { @@ -250,5 +251,18 @@ if [[ $backend == "all" || $backend == "server_inference_x86_cloud_gpu" ]]; then fi Print_Benchmark_Result ${run_benchmark_result_file} + +# run python ST +if [[ $backend == "all" || $backend == "server_inference_x86_cloud_gpu" ]]; then + models_python_config=${basepath}/../config_level0/models_python_gpu.cfg + models_python_cfg_file_list=("$models_python_config") + Run_python_ST ${basepath} ${x86_path} ${ms_models_path} ${models_path} "${models_python_cfg_file_list[*]}" "GPU" + Run_python_status=$? + if [[ ${Run_python_status} != 0 ]];then + echo "Run_python_status failed" + isFailed=1 + fi +fi + echo "run x86_gpu_server_inference is ended" exit ${isFailed}