From 6512f678d5a89e2112aee4e81e8cef03584d21d6 Mon Sep 17 00:00:00 2001 From: gongdaguo Date: Fri, 18 Sep 2020 17:12:13 +0800 Subject: [PATCH] Fix security error and change download link --- .../lite/test/models_tflite_awaretraining.cfg | 2 + .../lite/image_classification/README.en.md | 56 +-- .../lite/image_classification/README.md | 32 +- .../lite/object_detection/README.en.md | 336 +++++++++--------- .../official/lite/object_detection/README.md | 14 +- .../app/src/main/cpp/ssd_util/ssd_util.cpp | 4 +- .../app/src/main/cpp/ssd_util/ssd_util.h | 10 +- 7 files changed, 228 insertions(+), 226 deletions(-) diff --git a/mindspore/lite/test/models_tflite_awaretraining.cfg b/mindspore/lite/test/models_tflite_awaretraining.cfg index b1becf6f52..5fea713ea6 100644 --- a/mindspore/lite/test/models_tflite_awaretraining.cfg +++ b/mindspore/lite/test/models_tflite_awaretraining.cfg @@ -25,6 +25,8 @@ graph_8bit_1021_combine.tflite lite-model_aiy_vision_classifier_insects_V1_3.tflite lite-model_aiy_vision_classifier_plants_V1_3.tflite lite-model_object_detection_mobile_object_labeler_v1_1.tflite +lite-model_cropnet_classifier_cassava_disease_V1_1.tflite +vision_classifier_fungi_mobile_V1_1_default_1.tflite detect.tflite ssd_mobilenet_v1_1_default_1.tflite object_detection_mobile_object_localizer_v1_1_default_1.tflite diff --git a/model_zoo/official/lite/image_classification/README.en.md b/model_zoo/official/lite/image_classification/README.en.md index 8dc1605f9d..2a53dc98b0 100644 --- a/model_zoo/official/lite/image_classification/README.en.md +++ b/model_zoo/official/lite/image_classification/README.en.md @@ -126,11 +126,11 @@ target_link_libraries( ) ``` -* In this example, the download.gradle File configuration auto download MindSpore Lite version, placed in the 'app / src / main/cpp/mindspore_lite_x.x.x-minddata-arm64-cpu' directory. +* In this example, the download.gradle File configuration auto download MindSpore Lite version, placed in the 'app/src/main/cpp/' directory. Note: if the automatic download fails, please manually download the relevant library files and put them in the corresponding location. - MindSpore Lite version [MindSpore Lite version]( https://download.mindspore.cn/model_zoo/official/lite/lib/mindspore%20version%200.7/libmindspore-lite.so) + mindspore-lite-1.0.0-minddata-arm64-cpu.tar.gz [Download link](https://download.mindspore.cn/model_zoo/official/lite/lib/mindspore%20version%201.0/mindspore-lite-1.0.0-minddata-arm64-cpu.tar.gz) ### Downloading and Deploying a Model File @@ -189,32 +189,32 @@ The inference code process is as follows. For details about the complete code, s Convert the image data to be detected into the Tensor format of the MindSpore model. - ```cpp - // Convert the Bitmap image passed in from the JAVA layer to Mat for OpenCV processing - BitmapToMat(env, srcBitmap, matImageSrc); - // Processing such as zooming the picture size. - matImgPreprocessed = PreProcessImageData(matImageSrc); - - ImgDims inputDims; - inputDims.channel = matImgPreprocessed.channels(); - inputDims.width = matImgPreprocessed.cols; - inputDims.height = matImgPreprocessed.rows; - float *dataHWC = new float[inputDims.channel * inputDims.width * inputDims.height] - - // Copy the image data to be detected to the dataHWC array. - // The dataHWC[image_size] array here is the intermediate variable of the input MindSpore model tensor. - float *ptrTmp = reinterpret_cast(matImgPreprocessed.data); - for(int i = 0; i < inputDims.channel * inputDims.width * inputDims.height; i++){ - dataHWC[i] = ptrTmp[i]; - } - - // Assign dataHWC[image_size] to the input tensor variable. - auto msInputs = mSession->GetInputs(); - auto inTensor = msInputs.front(); - memcpy(inTensor->MutableData(), dataHWC, - inputDims.channel * inputDims.width * inputDims.height * sizeof(float)); - delete[] (dataHWC); - ``` + ```cpp + // Convert the Bitmap image passed in from the JAVA layer to Mat for OpenCV processing + BitmapToMat(env, srcBitmap, matImageSrc); + // Processing such as zooming the picture size. + matImgPreprocessed = PreProcessImageData(matImageSrc); + + ImgDims inputDims; + inputDims.channel = matImgPreprocessed.channels(); + inputDims.width = matImgPreprocessed.cols; + inputDims.height = matImgPreprocessed.rows; + float *dataHWC = new float[inputDims.channel * inputDims.width * inputDims.height] + + // Copy the image data to be detected to the dataHWC array. + // The dataHWC[image_size] array here is the intermediate variable of the input MindSpore model tensor. + float *ptrTmp = reinterpret_cast(matImgPreprocessed.data); + for(int i = 0; i < inputDims.channel * inputDims.width * inputDims.height; i++){ + dataHWC[i] = ptrTmp[i]; + } + + // Assign dataHWC[image_size] to the input tensor variable. + auto msInputs = mSession->GetInputs(); + auto inTensor = msInputs.front(); + memcpy(inTensor->MutableData(), dataHWC, + inputDims.channel * inputDims.width * inputDims.height * sizeof(float)); + delete[] (dataHWC); + ``` 3. Perform inference on the input tensor based on the model, obtain the output tensor, and perform post-processing. diff --git a/model_zoo/official/lite/image_classification/README.md b/model_zoo/official/lite/image_classification/README.md index e33c67b5f2..6c92ebdad6 100644 --- a/model_zoo/official/lite/image_classification/README.md +++ b/model_zoo/official/lite/image_classification/README.md @@ -86,13 +86,13 @@ app ### 配置MindSpore Lite依赖项 -Android JNI层调用MindSpore C++ API时,需要相关库文件支持。可通过MindSpore Lite源码编译生成`libmindspore-lite.so`库文件。 +Android JNI层调用MindSpore C++ API时,需要相关库文件支持。可通过MindSpore Lite[源码编译](https://www.mindspore.cn/lite/tutorial/zh-CN/master/build.html)生成"mindspore-lite-X.X.X-mindata-armXX-cpu"库文件包(包含`libmindspore-lite.so`库文件和相关头文件,可包含多个兼容架构)。 -本示例中,build过程由download.gradle文件自动从华为服务器下载MindSpore Lite 版本文件,并放置在`app / src / main/cpp/mindspore_lite_x.x.x-minddata-arm64-cpu`目录下。 +本示例中,build过程由download.gradle文件自动从华为服务器下载MindSpore Lite 版本文件,并放置在`app / src / main/cpp/`目录下。 * 注:若自动下载失败,请手动下载相关库文件并将其放在对应位置: - MindSpore Lite版本 [下载链接](https://download.mindspore.cn/model_zoo/official/lite/lib/mindspore%20version%200.7/libmindspore-lite.so) + mindspore-lite-1.0.0-minddata-arm64-cpu.tar.gz [下载链接](https://download.mindspore.cn/model_zoo/official/lite/lib/mindspore%20version%201.0/mindspore-lite-1.0.0-minddata-arm64-cpu.tar.gz) ``` @@ -243,20 +243,22 @@ target_link_libraries( - 输出数据的后续处理。 ```cpp - std::string ProcessRunnetResult(std::unordered_map msOutputs, int runnetRet) { - - std::unordered_map::iterator iter; + std::string ProcessRunnetResult(const int RET_CATEGORY_SUM, const char *const labels_name_map[], + std::unordered_map msOutputs) { + // Get the branch of the model output. + // Use iterators to get map elements. + std::unordered_map::iterator iter; iter = msOutputs.begin(); - + // The mobilenetv2.ms model output just one branch. - auto outputTensor = iter->second; + auto outputTensor = iter->second; + int tensorNum = outputTensor->ElementsNum(); MS_PRINT("Number of tensor elements:%d", tensorNum); - - // Get a pointer to the first score. + + // Get a pointer to the first score. float *temp_scores = static_cast(outputTensor->MutableData()); - + float scores[RET_CATEGORY_SUM]; for (int i = 0; i < RET_CATEGORY_SUM; ++i) { if (temp_scores[i] > 0.5) { @@ -264,7 +266,7 @@ target_link_libraries( } scores[i] = temp_scores[i]; } - + // Score for each category. // Converted to text information that needs to be displayed in the APP. std::string categoryScore = ""; @@ -276,5 +278,5 @@ target_link_libraries( categoryScore += ";"; } return categoryScore; - } - ``` + } + ``` diff --git a/model_zoo/official/lite/object_detection/README.en.md b/model_zoo/official/lite/object_detection/README.en.md index eae50bb03b..f127e3e686 100644 --- a/model_zoo/official/lite/object_detection/README.en.md +++ b/model_zoo/official/lite/object_detection/README.en.md @@ -88,7 +88,7 @@ In this example, the download.gradle File configuration auto download library f Note: if the automatic download fails, please manually download the relevant library files and put them in the corresponding location. -libmindspore-lite.so [libmindspore-lite.so]( https://download.mindspore.cn/model_zoo/official/lite/lib/mindspore%20version%200.7/libmindspore-lite.so) +mindspore-lite-1.0.0-minddata-arm64-cpu.tar.gz [Download link](https://download.mindspore.cn/model_zoo/official/lite/lib/mindspore%20version%201.0/mindspore-lite-1.0.0-minddata-arm64-cpu.tar.gz) @@ -365,188 +365,188 @@ The inference code process is as follows. For details about the complete code, s - For the targets whose probability is greater than the threshold value, the output rectangle box needs to be restored to the original size after the rectangular box is filtered by NMS algorithm. -```cpp -std::string SSDModelUtil::getDecodeResult(float *branchScores, float *branchBoxData) { - std::string result = ""; - NormalBox tmpBox[1917] = {0}; - float mScores[1917][81] = {0}; + ```cpp + std::string SSDModelUtil::getDecodeResult(float *branchScores, float *branchBoxData) { + std::string result = ""; + NormalBox tmpBox[1917] = {0}; + float mScores[1917][81] = {0}; - float outBuff[1917][7] = {0}; + float outBuff[1917][7] = {0}; - float scoreWithOneClass[1917] = {0}; - int outBoxNum = 0; - YXBoxes decodedBoxes[1917] = {0}; + float scoreWithOneClass[1917] = {0}; + int outBoxNum = 0; + YXBoxes decodedBoxes[1917] = {0}; - // Copy branch outputs box data to tmpBox. - for (int i = 0; i < 1917; ++i) { - tmpBox[i].y = branchBoxData[i * 4 + 0]; - tmpBox[i].x = branchBoxData[i * 4 + 1]; - tmpBox[i].h = branchBoxData[i * 4 + 2]; - tmpBox[i].w = branchBoxData[i * 4 + 3]; - } - - // Copy branch outputs score to mScores. - for (int i = 0; i < 1917; ++i) { - for (int j = 0; j < 81; ++j) { - mScores[i][j] = branchScores[i * 81 + j]; + // Copy branch outputs box data to tmpBox. + for (int i = 0; i < 1917; ++i) { + tmpBox[i].y = branchBoxData[i * 4 + 0]; + tmpBox[i].x = branchBoxData[i * 4 + 1]; + tmpBox[i].h = branchBoxData[i * 4 + 2]; + tmpBox[i].w = branchBoxData[i * 4 + 3]; } - } - // NMS processing. - ssd_boxes_decode(tmpBox, decodedBoxes); - // const float nms_threshold = 0.6; - const float nms_threshold = 0.3; - for (int i = 1; i < 81; i++) { - std::vector in_indexes; - for (int j = 0; j < 1917; j++) { - scoreWithOneClass[j] = mScores[j][i]; - // if (mScores[j][i] > 0.1) { - if (mScores[j][i] > g_thres_map[i]) { - in_indexes.push_back(j); + // Copy branch outputs score to mScores. + for (int i = 0; i < 1917; ++i) { + for (int j = 0; j < 81; ++j) { + mScores[i][j] = branchScores[i * 81 + j]; } } - if (in_indexes.size() == 0) { - continue; + + // NMS processing. + ssd_boxes_decode(tmpBox, decodedBoxes); + // const float nms_threshold = 0.6; + const float nms_threshold = 0.3; + for (int i = 1; i < 81; i++) { + std::vector in_indexes; + for (int j = 0; j < 1917; j++) { + scoreWithOneClass[j] = mScores[j][i]; + // if (mScores[j][i] > 0.1) { + if (mScores[j][i] > g_thres_map[i]) { + in_indexes.push_back(j); + } + } + if (in_indexes.size() == 0) { + continue; + } + + sort(in_indexes.begin(), in_indexes.end(), + [&](int a, int b) { return scoreWithOneClass[a] > scoreWithOneClass[b]; }); + std::vector out_indexes; + + nonMaximumSuppression(decodedBoxes, scoreWithOneClass, in_indexes, out_indexes, + nms_threshold); + for (int k = 0; k < out_indexes.size(); k++) { + outBuff[outBoxNum][0] = out_indexes[k]; //image id + outBuff[outBoxNum][1] = i; //labelid + outBuff[outBoxNum][2] = scoreWithOneClass[out_indexes[k]]; //scores + outBuff[outBoxNum][3] = + decodedBoxes[out_indexes[k]].xmin * inputImageWidth / 300; + outBuff[outBoxNum][4] = + decodedBoxes[out_indexes[k]].ymin * inputImageHeight / 300; + outBuff[outBoxNum][5] = + decodedBoxes[out_indexes[k]].xmax * inputImageWidth / 300; + outBuff[outBoxNum][6] = + decodedBoxes[out_indexes[k]].ymax * inputImageHeight / 300; + outBoxNum++; + } + } + MS_PRINT("outBoxNum %d", outBoxNum); + + for (int i = 0; i < outBoxNum; ++i) { + std::string tmpid_str = std::to_string(outBuff[i][0]); + result += tmpid_str; // image ID + result += "_"; + // tmpid_str = std::to_string(outBuff[i][1]); + MS_PRINT("label_classes i %d, outBuff %d",i, (int) outBuff[i][1]); + tmpid_str = label_classes[(int) outBuff[i][1]]; + result += tmpid_str; // label id + result += "_"; + tmpid_str = std::to_string(outBuff[i][2]); + result += tmpid_str; // scores + result += "_"; + tmpid_str = std::to_string(outBuff[i][3]); + result += tmpid_str; // xmin + result += "_"; + tmpid_str = std::to_string(outBuff[i][4]); + result += tmpid_str; // ymin + result += "_"; + tmpid_str = std::to_string(outBuff[i][5]); + result += tmpid_str; // xmax + result += "_"; + tmpid_str = std::to_string(outBuff[i][6]); + result += tmpid_str; // ymax + result += ";"; } - sort(in_indexes.begin(), in_indexes.end(), - [&](int a, int b) { return scoreWithOneClass[a] > scoreWithOneClass[b]; }); - std::vector out_indexes; + return result; + } + std::string SSDModelUtil::getDecodeResult(float *branchScores, float *branchBoxData) { + std::string result = ""; + NormalBox tmpBox[1917] = {0}; + float mScores[1917][81] = {0}; + float outBuff[1917][7] = {0}; + float scoreWithOneClass[1917] = {0}; + int outBoxNum = 0; + YXBoxes decodedBoxes[1917] = {0}; - nonMaximumSuppression(decodedBoxes, scoreWithOneClass, in_indexes, out_indexes, - nms_threshold); - for (int k = 0; k < out_indexes.size(); k++) { - outBuff[outBoxNum][0] = out_indexes[k]; //image id - outBuff[outBoxNum][1] = i; //labelid - outBuff[outBoxNum][2] = scoreWithOneClass[out_indexes[k]]; //scores - outBuff[outBoxNum][3] = - decodedBoxes[out_indexes[k]].xmin * inputImageWidth / 300; - outBuff[outBoxNum][4] = - decodedBoxes[out_indexes[k]].ymin * inputImageHeight / 300; - outBuff[outBoxNum][5] = - decodedBoxes[out_indexes[k]].xmax * inputImageWidth / 300; - outBuff[outBoxNum][6] = - decodedBoxes[out_indexes[k]].ymax * inputImageHeight / 300; - outBoxNum++; + // Copy branch outputs box data to tmpBox. + for (int i = 0; i < 1917; ++i) { + tmpBox[i].y = branchBoxData[i * 4 + 0]; + tmpBox[i].x = branchBoxData[i * 4 + 1]; + tmpBox[i].h = branchBoxData[i * 4 + 2]; + tmpBox[i].w = branchBoxData[i * 4 + 3]; } - } - MS_PRINT("outBoxNum %d", outBoxNum); - for (int i = 0; i < outBoxNum; ++i) { - std::string tmpid_str = std::to_string(outBuff[i][0]); - result += tmpid_str; // image ID - result += "_"; - // tmpid_str = std::to_string(outBuff[i][1]); - MS_PRINT("label_classes i %d, outBuff %d",i, (int) outBuff[i][1]); - tmpid_str = label_classes[(int) outBuff[i][1]]; - result += tmpid_str; // label id - result += "_"; - tmpid_str = std::to_string(outBuff[i][2]); - result += tmpid_str; // scores - result += "_"; - tmpid_str = std::to_string(outBuff[i][3]); - result += tmpid_str; // xmin - result += "_"; - tmpid_str = std::to_string(outBuff[i][4]); - result += tmpid_str; // ymin - result += "_"; - tmpid_str = std::to_string(outBuff[i][5]); - result += tmpid_str; // xmax - result += "_"; - tmpid_str = std::to_string(outBuff[i][6]); - result += tmpid_str; // ymax - result += ";"; - } - - return result; -} -std::string SSDModelUtil::getDecodeResult(float *branchScores, float *branchBoxData) { - std::string result = ""; - NormalBox tmpBox[1917] = {0}; - float mScores[1917][81] = {0}; - float outBuff[1917][7] = {0}; - float scoreWithOneClass[1917] = {0}; - int outBoxNum = 0; - YXBoxes decodedBoxes[1917] = {0}; - - // Copy branch outputs box data to tmpBox. - for (int i = 0; i < 1917; ++i) { - tmpBox[i].y = branchBoxData[i * 4 + 0]; - tmpBox[i].x = branchBoxData[i * 4 + 1]; - tmpBox[i].h = branchBoxData[i * 4 + 2]; - tmpBox[i].w = branchBoxData[i * 4 + 3]; - } - - // Copy branch outputs score to mScores. - for (int i = 0; i < 1917; ++i) { - for (int j = 0; j < 81; ++j) { - mScores[i][j] = branchScores[i * 81 + j]; + // Copy branch outputs score to mScores. + for (int i = 0; i < 1917; ++i) { + for (int j = 0; j < 81; ++j) { + mScores[i][j] = branchScores[i * 81 + j]; + } } - } - ssd_boxes_decode(tmpBox, decodedBoxes); - const float nms_threshold = 0.3; - for (int i = 1; i < 81; i++) { - std::vector in_indexes; - for (int j = 0; j < 1917; j++) { - scoreWithOneClass[j] = mScores[j][i]; - if (mScores[j][i] > g_thres_map[i]) { - in_indexes.push_back(j); + ssd_boxes_decode(tmpBox, decodedBoxes); + const float nms_threshold = 0.3; + for (int i = 1; i < 81; i++) { + std::vector in_indexes; + for (int j = 0; j < 1917; j++) { + scoreWithOneClass[j] = mScores[j][i]; + if (mScores[j][i] > g_thres_map[i]) { + in_indexes.push_back(j); + } + } + if (in_indexes.size() == 0) { + continue; + } + + sort(in_indexes.begin(), in_indexes.end(), + [&](int a, int b) { return scoreWithOneClass[a] > scoreWithOneClass[b]; }); + std::vector out_indexes; + + nonMaximumSuppression(decodedBoxes, scoreWithOneClass, in_indexes, out_indexes, + nms_threshold); + for (int k = 0; k < out_indexes.size(); k++) { + outBuff[outBoxNum][0] = out_indexes[k]; //image id + outBuff[outBoxNum][1] = i; //labelid + outBuff[outBoxNum][2] = scoreWithOneClass[out_indexes[k]]; //scores + outBuff[outBoxNum][3] = + decodedBoxes[out_indexes[k]].xmin * inputImageWidth / 300; + outBuff[outBoxNum][4] = + decodedBoxes[out_indexes[k]].ymin * inputImageHeight / 300; + outBuff[outBoxNum][5] = + decodedBoxes[out_indexes[k]].xmax * inputImageWidth / 300; + outBuff[outBoxNum][6] = + decodedBoxes[out_indexes[k]].ymax * inputImageHeight / 300; + outBoxNum++; } } - if (in_indexes.size() == 0) { - continue; + MS_PRINT("outBoxNum %d", outBoxNum); + + for (int i = 0; i < outBoxNum; ++i) { + std::string tmpid_str = std::to_string(outBuff[i][0]); + result += tmpid_str; // image ID + result += "_"; + // tmpid_str = std::to_string(outBuff[i][1]); + MS_PRINT("label_classes i %d, outBuff %d",i, (int) outBuff[i][1]); + tmpid_str = label_classes[(int) outBuff[i][1]]; + result += tmpid_str; // label id + result += "_"; + tmpid_str = std::to_string(outBuff[i][2]); + result += tmpid_str; // scores + result += "_"; + tmpid_str = std::to_string(outBuff[i][3]); + result += tmpid_str; // xmin + result += "_"; + tmpid_str = std::to_string(outBuff[i][4]); + result += tmpid_str; // ymin + result += "_"; + tmpid_str = std::to_string(outBuff[i][5]); + result += tmpid_str; // xmax + result += "_"; + tmpid_str = std::to_string(outBuff[i][6]); + result += tmpid_str; // ymax + result += ";"; } - - sort(in_indexes.begin(), in_indexes.end(), - [&](int a, int b) { return scoreWithOneClass[a] > scoreWithOneClass[b]; }); - std::vector out_indexes; - - nonMaximumSuppression(decodedBoxes, scoreWithOneClass, in_indexes, out_indexes, - nms_threshold); - for (int k = 0; k < out_indexes.size(); k++) { - outBuff[outBoxNum][0] = out_indexes[k]; //image id - outBuff[outBoxNum][1] = i; //labelid - outBuff[outBoxNum][2] = scoreWithOneClass[out_indexes[k]]; //scores - outBuff[outBoxNum][3] = - decodedBoxes[out_indexes[k]].xmin * inputImageWidth / 300; - outBuff[outBoxNum][4] = - decodedBoxes[out_indexes[k]].ymin * inputImageHeight / 300; - outBuff[outBoxNum][5] = - decodedBoxes[out_indexes[k]].xmax * inputImageWidth / 300; - outBuff[outBoxNum][6] = - decodedBoxes[out_indexes[k]].ymax * inputImageHeight / 300; - outBoxNum++; - } - } - MS_PRINT("outBoxNum %d", outBoxNum); - - for (int i = 0; i < outBoxNum; ++i) { - std::string tmpid_str = std::to_string(outBuff[i][0]); - result += tmpid_str; // image ID - result += "_"; - // tmpid_str = std::to_string(outBuff[i][1]); - MS_PRINT("label_classes i %d, outBuff %d",i, (int) outBuff[i][1]); - tmpid_str = label_classes[(int) outBuff[i][1]]; - result += tmpid_str; // label id - result += "_"; - tmpid_str = std::to_string(outBuff[i][2]); - result += tmpid_str; // scores - result += "_"; - tmpid_str = std::to_string(outBuff[i][3]); - result += tmpid_str; // xmin - result += "_"; - tmpid_str = std::to_string(outBuff[i][4]); - result += tmpid_str; // ymin - result += "_"; - tmpid_str = std::to_string(outBuff[i][5]); - result += tmpid_str; // xmax - result += "_"; - tmpid_str = std::to_string(outBuff[i][6]); - result += tmpid_str; // ymax - result += ";"; - } - return result; -} -``` + return result; + } + ``` diff --git a/model_zoo/official/lite/object_detection/README.md b/model_zoo/official/lite/object_detection/README.md index da3751f625..aeed405dff 100644 --- a/model_zoo/official/lite/object_detection/README.md +++ b/model_zoo/official/lite/object_detection/README.md @@ -27,7 +27,7 @@ 2. 连接Android设备,运行目标检测示例应用程序。 通过USB连接Android设备调试,点击`Run 'app'`即可在你的设备上运行本示例项目。 - * 注:编译过程中Android Studio会自动下载MindSpore Lite、OpenCV、模型文件等相关依赖项,编译过程需做耐心等待。 + * 注:编译过程中Android Studio会自动下载MindSpore Lite、模型文件等相关依赖项,编译过程需做耐心等待。 ![run_app](images/run_app.PNG) @@ -85,9 +85,9 @@ app ### 配置MindSpore Lite依赖项 -Android JNI层调用MindSpore C++ API时,需要相关库文件支持。可通过MindSpore Lite[源码编译](https://www.mindspore.cn/lite/docs/zh-CN/master/deploy.html)生成`libmindspore-lite.so`库文件。 +Android JNI层调用MindSpore C++ API时,需要相关库文件支持。可通过MindSpore Lite[源码编译](https://www.mindspore.cn/lite/tutorial/zh-CN/master/build.html)生成"mindspore-lite-X.X.X-mindata-armXX-cpu"库文件包(包含`libmindspore-lite.so`库文件和相关头文件,可包含多个兼容架构)。 -在Android Studio中将编译完成的mindspore-lite-X.X.X-mindata-armXX-cpu压缩包(包含`libmindspore-lite.so`库文件和相关头文件,可包含多个兼容架构),解压之后放置在APP工程的`app/src/main/cpp`目录下,并在app的`build.gradle`文件中配置CMake编译支持,以及`arm64-v8a`和`armeabi-v7a`的编译支持,如下所示: +在Android Studio中将编译完成的mindspore-lite-X.X.X-mindata-armXX-cpu压缩包,解压之后放置在APP工程的`app/src/main/cpp`目录下,并在app的`build.gradle`文件中配置CMake编译支持,以及`arm64-v8a`和`armeabi-v7a`的编译支持,如下所示: ``` android{ defaultConfig{ @@ -130,7 +130,7 @@ target_link_libraries( * 注:若自动下载失败,请手动下载相关库文件并将其放在对应位置: -* libmindspore-lite.so [下载链接](https://download.mindspore.cn/model_zoo/official/lite/lib/mindspore%20version%200.7/libmindspore-lite.so) +* mindspore-lite-1.0.0-minddata-arm64-cpu.tar.gz [下载链接](https://download.mindspore.cn/model_zoo/official/lite/lib/mindspore%20version%201.0/mindspore-lite-1.0.0-minddata-arm64-cpu.tar.gz) ### 下载及部署模型文件 @@ -235,7 +235,7 @@ target_link_libraries( 3. 进行模型推理前,输入tensor格式为 NHWC,shape为1:300:300:3,格式为RGB, 并对输入tensor做标准化处理. ```cpp -bool PreProcessImageData(LiteMat &lite_mat_bgr,LiteMat &lite_norm_mat_cut) { + bool PreProcessImageData(LiteMat &lite_mat_bgr,LiteMat &lite_norm_mat_cut) { bool ret=false; LiteMat lite_mat_resize; ret = ResizeBilinear(lite_mat_bgr, lite_mat_resize, 300, 300); @@ -273,12 +273,12 @@ bool PreProcessImageData(LiteMat &lite_mat_bgr,LiteMat &lite_norm_mat_cut) { std::vector> Msout; std::unordered_map msOutputs; - for (const auto &name : names) { + for (const auto &name : names) { auto temp_dat =mSession->GetOutputByTensorName(name); msOutputs.insert(std::pair {name, temp_dat}); } std::string retStr = ProcessRunnetResult(msOutputs, ret); - ``` + ``` - 模型有2个输出,输出1是目标的类别置信度,维度为1:1917: 81; 输出2是目标的矩形框坐标偏移量,维度为1:1917:4。 为了得出目标的实际矩形框,需要根据偏移量计算出矩形框的位置。这部分在 getDefaultBoxes中实现。 diff --git a/model_zoo/official/lite/object_detection/app/src/main/cpp/ssd_util/ssd_util.cpp b/model_zoo/official/lite/object_detection/app/src/main/cpp/ssd_util/ssd_util.cpp index 1a7fdb237d..f222bcb70f 100644 --- a/model_zoo/official/lite/object_detection/app/src/main/cpp/ssd_util/ssd_util.cpp +++ b/model_zoo/official/lite/object_detection/app/src/main/cpp/ssd_util/ssd_util.cpp @@ -20,6 +20,7 @@ #define MS_PRINT(format, ...) __android_log_print(ANDROID_LOG_INFO, "MSJNI", format, ##__VA_ARGS__) +SSDModelUtil::~SSDModelUtil(void) {} /** * SSD model util constructor. @@ -61,8 +62,7 @@ std::string SSDModelUtil::getDecodeResult(float *branchScores, float *branchBoxD } // NMS processing. - ssd_boxes_decode(tmpBox, decodedBoxes); - // const float nms_threshold = 0.6; + ssd_boxes_decode(tmpBox, decodedBoxes, 0.1, 0.2, 1917); const float nms_threshold = 0.3; for (int i = 1; i < 81; i++) { std::vector in_indexes; diff --git a/model_zoo/official/lite/object_detection/app/src/main/cpp/ssd_util/ssd_util.h b/model_zoo/official/lite/object_detection/app/src/main/cpp/ssd_util/ssd_util.h index 8016aef893..7e0ddfc18f 100644 --- a/model_zoo/official/lite/object_detection/app/src/main/cpp/ssd_util/ssd_util.h +++ b/model_zoo/official/lite/object_detection/app/src/main/cpp/ssd_util/ssd_util.h @@ -26,6 +26,8 @@ class SSDModelUtil { // Constructor. SSDModelUtil(int srcImageWidth, int srcImgHeight); + ~SSDModelUtil(); + /** * Return the SSD model post-processing result. * @param branchScores @@ -34,10 +36,6 @@ class SSDModelUtil { */ std::string getDecodeResult(float *branchScores, float *branchBoxData); - // ============= variables =============. - int inputImageHeight; - int inputImageWidth; - struct NormalBox { float y; float x; @@ -64,7 +62,8 @@ class SSDModelUtil { private: std::vector mDefaultBoxes; - + int inputImageHeight; + int inputImageWidth; void getDefaultBoxes(); @@ -80,7 +79,6 @@ class SSDModelUtil { double IOU(float r1[4], float r2[4]); - // ============= variables =============. struct network { int model_input_height = 300;