From a7e625245e219ff265ae6aea9772cdecd177e2bb Mon Sep 17 00:00:00 2001 From: yefeng Date: Thu, 28 Apr 2022 10:01:43 +0800 Subject: [PATCH] waiting tasks in the queue exceeds the limit --- include/api/status.h | 1 + .../lite/src/cxx_api/model_pool/model_parallel_runner.cc | 2 +- mindspore/lite/src/cxx_api/model_pool/model_pool.cc | 6 ++++++ 3 files changed, 8 insertions(+), 1 deletion(-) diff --git a/include/api/status.h b/include/api/status.h index d01a40755ca..f86c322e469 100644 --- a/include/api/status.h +++ b/include/api/status.h @@ -80,6 +80,7 @@ enum StatusCode : uint32_t { kLiteThreadPoolError = kLite | (0x0FFFFFFF & -8), /**< Error occur in thread pool. */ kLiteUninitializedObj = kLite | (0x0FFFFFFF & -9), /**< Object is not initialized. */ kLiteFileError = kLite | (0x0FFFFFFF & -10), /**< Invalid file. */ + kLiteServiceDeny = kLite | (0x0FFFFFFF & -11), /**< Denial of service. */ // Executor error code, range: [-100,-200) kLiteOutOfTensorRange = kLite | (0x0FFFFFFF & -100), /**< Failed to check range. */ diff --git a/mindspore/lite/src/cxx_api/model_pool/model_parallel_runner.cc b/mindspore/lite/src/cxx_api/model_pool/model_parallel_runner.cc index dfaba1e0d2e..2b124d3c8ab 100644 --- a/mindspore/lite/src/cxx_api/model_pool/model_parallel_runner.cc +++ b/mindspore/lite/src/cxx_api/model_pool/model_parallel_runner.cc @@ -57,7 +57,7 @@ Status ModelParallelRunner::Predict(const std::vector &inputs, std::ve auto status = model_pool_->Predict(inputs, outputs, before, after); if (status != kSuccess) { MS_LOG(ERROR) << "model runner predict failed."; - return kLiteError; + return status; } return kSuccess; } diff --git a/mindspore/lite/src/cxx_api/model_pool/model_pool.cc b/mindspore/lite/src/cxx_api/model_pool/model_pool.cc index 3ff7335114b..44beb306885 100644 --- a/mindspore/lite/src/cxx_api/model_pool/model_pool.cc +++ b/mindspore/lite/src/cxx_api/model_pool/model_pool.cc @@ -29,6 +29,7 @@ namespace mindspore { namespace { constexpr int32_t kNumThreads = 8; constexpr int kNumDeviceInfo = 2; +constexpr int kNumMaxTaskQueueSize = 1000; int GetCoreNum() { int core_num = 1; #if defined(_MSC_VER) || defined(_WIN32) @@ -639,6 +640,11 @@ Status ModelPool::Predict(const std::vector &inputs, std::vectorGetTaskNum(max_wait_worker_node_id) > kNumMaxTaskQueueSize) { + MS_LOG(ERROR) << "The number of waiting tasks in the queue exceeds the limit, ret=" << kLiteServiceDeny; + predict_task_mutex_.unlock(); + return kLiteServiceDeny; + } predict_task_queue_->DecreaseWaitModelNum(1, max_wait_worker_node_id); auto predict_task = std::make_shared(&inputs, outputs, before, after); if (predict_task == nullptr) {