Fix security error and change download link

This commit is contained in:
gongdaguo 2020-09-18 17:12:13 +08:00
parent 656f9df5be
commit 6512f678d5
7 changed files with 228 additions and 226 deletions

View File

@ -25,6 +25,8 @@ graph_8bit_1021_combine.tflite
lite-model_aiy_vision_classifier_insects_V1_3.tflite
lite-model_aiy_vision_classifier_plants_V1_3.tflite
lite-model_object_detection_mobile_object_labeler_v1_1.tflite
lite-model_cropnet_classifier_cassava_disease_V1_1.tflite
vision_classifier_fungi_mobile_V1_1_default_1.tflite
detect.tflite
ssd_mobilenet_v1_1_default_1.tflite
object_detection_mobile_object_localizer_v1_1_default_1.tflite

View File

@ -126,11 +126,11 @@ target_link_libraries(
)
```
* In this example, the download.gradle File configuration auto download MindSpore Lite version, placed in the 'app / src / main/cpp/mindspore_lite_x.x.x-minddata-arm64-cpu' directory.
* In this example, the download.gradle File configuration auto download MindSpore Lite version, placed in the 'app/src/main/cpp/' directory.
Note: if the automatic download fails, please manually download the relevant library files and put them in the corresponding location.
MindSpore Lite version [MindSpore Lite version]( https://download.mindspore.cn/model_zoo/official/lite/lib/mindspore%20version%200.7/libmindspore-lite.so)
mindspore-lite-1.0.0-minddata-arm64-cpu.tar.gz [Download link](https://download.mindspore.cn/model_zoo/official/lite/lib/mindspore%20version%201.0/mindspore-lite-1.0.0-minddata-arm64-cpu.tar.gz)
### Downloading and Deploying a Model File
@ -189,32 +189,32 @@ The inference code process is as follows. For details about the complete code, s
Convert the image data to be detected into the Tensor format of the MindSpore model.
```cpp
// Convert the Bitmap image passed in from the JAVA layer to Mat for OpenCV processing
BitmapToMat(env, srcBitmap, matImageSrc);
// Processing such as zooming the picture size.
matImgPreprocessed = PreProcessImageData(matImageSrc);
ImgDims inputDims;
inputDims.channel = matImgPreprocessed.channels();
inputDims.width = matImgPreprocessed.cols;
inputDims.height = matImgPreprocessed.rows;
float *dataHWC = new float[inputDims.channel * inputDims.width * inputDims.height]
// Copy the image data to be detected to the dataHWC array.
// The dataHWC[image_size] array here is the intermediate variable of the input MindSpore model tensor.
float *ptrTmp = reinterpret_cast<float *>(matImgPreprocessed.data);
for(int i = 0; i < inputDims.channel * inputDims.width * inputDims.height; i++){
dataHWC[i] = ptrTmp[i];
}
// Assign dataHWC[image_size] to the input tensor variable.
auto msInputs = mSession->GetInputs();
auto inTensor = msInputs.front();
memcpy(inTensor->MutableData(), dataHWC,
inputDims.channel * inputDims.width * inputDims.height * sizeof(float));
delete[] (dataHWC);
```
```cpp
// Convert the Bitmap image passed in from the JAVA layer to Mat for OpenCV processing
BitmapToMat(env, srcBitmap, matImageSrc);
// Processing such as zooming the picture size.
matImgPreprocessed = PreProcessImageData(matImageSrc);
ImgDims inputDims;
inputDims.channel = matImgPreprocessed.channels();
inputDims.width = matImgPreprocessed.cols;
inputDims.height = matImgPreprocessed.rows;
float *dataHWC = new float[inputDims.channel * inputDims.width * inputDims.height]
// Copy the image data to be detected to the dataHWC array.
// The dataHWC[image_size] array here is the intermediate variable of the input MindSpore model tensor.
float *ptrTmp = reinterpret_cast<float *>(matImgPreprocessed.data);
for(int i = 0; i < inputDims.channel * inputDims.width * inputDims.height; i++){
dataHWC[i] = ptrTmp[i];
}
// Assign dataHWC[image_size] to the input tensor variable.
auto msInputs = mSession->GetInputs();
auto inTensor = msInputs.front();
memcpy(inTensor->MutableData(), dataHWC,
inputDims.channel * inputDims.width * inputDims.height * sizeof(float));
delete[] (dataHWC);
```
3. Perform inference on the input tensor based on the model, obtain the output tensor, and perform post-processing.

View File

@ -86,13 +86,13 @@ app
### 配置MindSpore Lite依赖项
Android JNI层调用MindSpore C++ API时需要相关库文件支持。可通过MindSpore Lite源码编译生成`libmindspore-lite.so`库文件。
Android JNI层调用MindSpore C++ API时需要相关库文件支持。可通过MindSpore Lite[源码编译](https://www.mindspore.cn/lite/tutorial/zh-CN/master/build.html)生成"mindspore-lite-X.X.X-mindata-armXX-cpu"库文件包(包含`libmindspore-lite.so`库文件和相关头文件,可包含多个兼容架构)
本示例中build过程由download.gradle文件自动从华为服务器下载MindSpore Lite 版本文件,并放置在`app / src / main/cpp/mindspore_lite_x.x.x-minddata-arm64-cpu`目录下。
本示例中build过程由download.gradle文件自动从华为服务器下载MindSpore Lite 版本文件,并放置在`app / src / main/cpp/`目录下。
* 注:若自动下载失败,请手动下载相关库文件并将其放在对应位置:
MindSpore Lite版本 [下载链接](https://download.mindspore.cn/model_zoo/official/lite/lib/mindspore%20version%200.7/libmindspore-lite.so)
mindspore-lite-1.0.0-minddata-arm64-cpu.tar.gz [下载链接](https://download.mindspore.cn/model_zoo/official/lite/lib/mindspore%20version%201.0/mindspore-lite-1.0.0-minddata-arm64-cpu.tar.gz)
```
@ -243,20 +243,22 @@ target_link_libraries(
- 输出数据的后续处理。
```cpp
std::string ProcessRunnetResult(std::unordered_map<std::string,
mindspore::tensor::MSTensor *> msOutputs, int runnetRet) {
std::unordered_map<std::string, mindspore::tensor::MSTensor *>::iterator iter;
std::string ProcessRunnetResult(const int RET_CATEGORY_SUM, const char *const labels_name_map[],
std::unordered_map<std::string, mindspore::tensor::MSTensor *> msOutputs) {
// Get the branch of the model output.
// Use iterators to get map elements.
std::unordered_map<std::string, mindspore::tensor::MSTensor *>::iterator iter;
iter = msOutputs.begin();
// The mobilenetv2.ms model output just one branch.
auto outputTensor = iter->second;
auto outputTensor = iter->second;
int tensorNum = outputTensor->ElementsNum();
MS_PRINT("Number of tensor elements:%d", tensorNum);
// Get a pointer to the first score.
// Get a pointer to the first score.
float *temp_scores = static_cast<float * >(outputTensor->MutableData());
float scores[RET_CATEGORY_SUM];
for (int i = 0; i < RET_CATEGORY_SUM; ++i) {
if (temp_scores[i] > 0.5) {
@ -264,7 +266,7 @@ target_link_libraries(
}
scores[i] = temp_scores[i];
}
// Score for each category.
// Converted to text information that needs to be displayed in the APP.
std::string categoryScore = "";
@ -276,5 +278,5 @@ target_link_libraries(
categoryScore += ";";
}
return categoryScore;
}
```
}
```

View File

@ -88,7 +88,7 @@ In this example, the download.gradle File configuration auto download library f
Note: if the automatic download fails, please manually download the relevant library files and put them in the corresponding location.
libmindspore-lite.so [libmindspore-lite.so]( https://download.mindspore.cn/model_zoo/official/lite/lib/mindspore%20version%200.7/libmindspore-lite.so)
mindspore-lite-1.0.0-minddata-arm64-cpu.tar.gz [Download link](https://download.mindspore.cn/model_zoo/official/lite/lib/mindspore%20version%201.0/mindspore-lite-1.0.0-minddata-arm64-cpu.tar.gz)
@ -365,188 +365,188 @@ The inference code process is as follows. For details about the complete code, s
- For the targets whose probability is greater than the threshold value, the output rectangle box needs to be restored to the original size after the rectangular box is filtered by NMS algorithm.
```cpp
std::string SSDModelUtil::getDecodeResult(float *branchScores, float *branchBoxData) {
std::string result = "";
NormalBox tmpBox[1917] = {0};
float mScores[1917][81] = {0};
```cpp
std::string SSDModelUtil::getDecodeResult(float *branchScores, float *branchBoxData) {
std::string result = "";
NormalBox tmpBox[1917] = {0};
float mScores[1917][81] = {0};
float outBuff[1917][7] = {0};
float outBuff[1917][7] = {0};
float scoreWithOneClass[1917] = {0};
int outBoxNum = 0;
YXBoxes decodedBoxes[1917] = {0};
float scoreWithOneClass[1917] = {0};
int outBoxNum = 0;
YXBoxes decodedBoxes[1917] = {0};
// Copy branch outputs box data to tmpBox.
for (int i = 0; i < 1917; ++i) {
tmpBox[i].y = branchBoxData[i * 4 + 0];
tmpBox[i].x = branchBoxData[i * 4 + 1];
tmpBox[i].h = branchBoxData[i * 4 + 2];
tmpBox[i].w = branchBoxData[i * 4 + 3];
}
// Copy branch outputs score to mScores.
for (int i = 0; i < 1917; ++i) {
for (int j = 0; j < 81; ++j) {
mScores[i][j] = branchScores[i * 81 + j];
// Copy branch outputs box data to tmpBox.
for (int i = 0; i < 1917; ++i) {
tmpBox[i].y = branchBoxData[i * 4 + 0];
tmpBox[i].x = branchBoxData[i * 4 + 1];
tmpBox[i].h = branchBoxData[i * 4 + 2];
tmpBox[i].w = branchBoxData[i * 4 + 3];
}
}
// NMS processing.
ssd_boxes_decode(tmpBox, decodedBoxes);
// const float nms_threshold = 0.6;
const float nms_threshold = 0.3;
for (int i = 1; i < 81; i++) {
std::vector<int> in_indexes;
for (int j = 0; j < 1917; j++) {
scoreWithOneClass[j] = mScores[j][i];
// if (mScores[j][i] > 0.1) {
if (mScores[j][i] > g_thres_map[i]) {
in_indexes.push_back(j);
// Copy branch outputs score to mScores.
for (int i = 0; i < 1917; ++i) {
for (int j = 0; j < 81; ++j) {
mScores[i][j] = branchScores[i * 81 + j];
}
}
if (in_indexes.size() == 0) {
continue;
// NMS processing.
ssd_boxes_decode(tmpBox, decodedBoxes);
// const float nms_threshold = 0.6;
const float nms_threshold = 0.3;
for (int i = 1; i < 81; i++) {
std::vector<int> in_indexes;
for (int j = 0; j < 1917; j++) {
scoreWithOneClass[j] = mScores[j][i];
// if (mScores[j][i] > 0.1) {
if (mScores[j][i] > g_thres_map[i]) {
in_indexes.push_back(j);
}
}
if (in_indexes.size() == 0) {
continue;
}
sort(in_indexes.begin(), in_indexes.end(),
[&](int a, int b) { return scoreWithOneClass[a] > scoreWithOneClass[b]; });
std::vector<int> out_indexes;
nonMaximumSuppression(decodedBoxes, scoreWithOneClass, in_indexes, out_indexes,
nms_threshold);
for (int k = 0; k < out_indexes.size(); k++) {
outBuff[outBoxNum][0] = out_indexes[k]; //image id
outBuff[outBoxNum][1] = i; //labelid
outBuff[outBoxNum][2] = scoreWithOneClass[out_indexes[k]]; //scores
outBuff[outBoxNum][3] =
decodedBoxes[out_indexes[k]].xmin * inputImageWidth / 300;
outBuff[outBoxNum][4] =
decodedBoxes[out_indexes[k]].ymin * inputImageHeight / 300;
outBuff[outBoxNum][5] =
decodedBoxes[out_indexes[k]].xmax * inputImageWidth / 300;
outBuff[outBoxNum][6] =
decodedBoxes[out_indexes[k]].ymax * inputImageHeight / 300;
outBoxNum++;
}
}
MS_PRINT("outBoxNum %d", outBoxNum);
for (int i = 0; i < outBoxNum; ++i) {
std::string tmpid_str = std::to_string(outBuff[i][0]);
result += tmpid_str; // image ID
result += "_";
// tmpid_str = std::to_string(outBuff[i][1]);
MS_PRINT("label_classes i %d, outBuff %d",i, (int) outBuff[i][1]);
tmpid_str = label_classes[(int) outBuff[i][1]];
result += tmpid_str; // label id
result += "_";
tmpid_str = std::to_string(outBuff[i][2]);
result += tmpid_str; // scores
result += "_";
tmpid_str = std::to_string(outBuff[i][3]);
result += tmpid_str; // xmin
result += "_";
tmpid_str = std::to_string(outBuff[i][4]);
result += tmpid_str; // ymin
result += "_";
tmpid_str = std::to_string(outBuff[i][5]);
result += tmpid_str; // xmax
result += "_";
tmpid_str = std::to_string(outBuff[i][6]);
result += tmpid_str; // ymax
result += ";";
}
sort(in_indexes.begin(), in_indexes.end(),
[&](int a, int b) { return scoreWithOneClass[a] > scoreWithOneClass[b]; });
std::vector<int> out_indexes;
return result;
}
std::string SSDModelUtil::getDecodeResult(float *branchScores, float *branchBoxData) {
std::string result = "";
NormalBox tmpBox[1917] = {0};
float mScores[1917][81] = {0};
float outBuff[1917][7] = {0};
float scoreWithOneClass[1917] = {0};
int outBoxNum = 0;
YXBoxes decodedBoxes[1917] = {0};
nonMaximumSuppression(decodedBoxes, scoreWithOneClass, in_indexes, out_indexes,
nms_threshold);
for (int k = 0; k < out_indexes.size(); k++) {
outBuff[outBoxNum][0] = out_indexes[k]; //image id
outBuff[outBoxNum][1] = i; //labelid
outBuff[outBoxNum][2] = scoreWithOneClass[out_indexes[k]]; //scores
outBuff[outBoxNum][3] =
decodedBoxes[out_indexes[k]].xmin * inputImageWidth / 300;
outBuff[outBoxNum][4] =
decodedBoxes[out_indexes[k]].ymin * inputImageHeight / 300;
outBuff[outBoxNum][5] =
decodedBoxes[out_indexes[k]].xmax * inputImageWidth / 300;
outBuff[outBoxNum][6] =
decodedBoxes[out_indexes[k]].ymax * inputImageHeight / 300;
outBoxNum++;
// Copy branch outputs box data to tmpBox.
for (int i = 0; i < 1917; ++i) {
tmpBox[i].y = branchBoxData[i * 4 + 0];
tmpBox[i].x = branchBoxData[i * 4 + 1];
tmpBox[i].h = branchBoxData[i * 4 + 2];
tmpBox[i].w = branchBoxData[i * 4 + 3];
}
}
MS_PRINT("outBoxNum %d", outBoxNum);
for (int i = 0; i < outBoxNum; ++i) {
std::string tmpid_str = std::to_string(outBuff[i][0]);
result += tmpid_str; // image ID
result += "_";
// tmpid_str = std::to_string(outBuff[i][1]);
MS_PRINT("label_classes i %d, outBuff %d",i, (int) outBuff[i][1]);
tmpid_str = label_classes[(int) outBuff[i][1]];
result += tmpid_str; // label id
result += "_";
tmpid_str = std::to_string(outBuff[i][2]);
result += tmpid_str; // scores
result += "_";
tmpid_str = std::to_string(outBuff[i][3]);
result += tmpid_str; // xmin
result += "_";
tmpid_str = std::to_string(outBuff[i][4]);
result += tmpid_str; // ymin
result += "_";
tmpid_str = std::to_string(outBuff[i][5]);
result += tmpid_str; // xmax
result += "_";
tmpid_str = std::to_string(outBuff[i][6]);
result += tmpid_str; // ymax
result += ";";
}
return result;
}
std::string SSDModelUtil::getDecodeResult(float *branchScores, float *branchBoxData) {
std::string result = "";
NormalBox tmpBox[1917] = {0};
float mScores[1917][81] = {0};
float outBuff[1917][7] = {0};
float scoreWithOneClass[1917] = {0};
int outBoxNum = 0;
YXBoxes decodedBoxes[1917] = {0};
// Copy branch outputs box data to tmpBox.
for (int i = 0; i < 1917; ++i) {
tmpBox[i].y = branchBoxData[i * 4 + 0];
tmpBox[i].x = branchBoxData[i * 4 + 1];
tmpBox[i].h = branchBoxData[i * 4 + 2];
tmpBox[i].w = branchBoxData[i * 4 + 3];
}
// Copy branch outputs score to mScores.
for (int i = 0; i < 1917; ++i) {
for (int j = 0; j < 81; ++j) {
mScores[i][j] = branchScores[i * 81 + j];
// Copy branch outputs score to mScores.
for (int i = 0; i < 1917; ++i) {
for (int j = 0; j < 81; ++j) {
mScores[i][j] = branchScores[i * 81 + j];
}
}
}
ssd_boxes_decode(tmpBox, decodedBoxes);
const float nms_threshold = 0.3;
for (int i = 1; i < 81; i++) {
std::vector<int> in_indexes;
for (int j = 0; j < 1917; j++) {
scoreWithOneClass[j] = mScores[j][i];
if (mScores[j][i] > g_thres_map[i]) {
in_indexes.push_back(j);
ssd_boxes_decode(tmpBox, decodedBoxes);
const float nms_threshold = 0.3;
for (int i = 1; i < 81; i++) {
std::vector<int> in_indexes;
for (int j = 0; j < 1917; j++) {
scoreWithOneClass[j] = mScores[j][i];
if (mScores[j][i] > g_thres_map[i]) {
in_indexes.push_back(j);
}
}
if (in_indexes.size() == 0) {
continue;
}
sort(in_indexes.begin(), in_indexes.end(),
[&](int a, int b) { return scoreWithOneClass[a] > scoreWithOneClass[b]; });
std::vector<int> out_indexes;
nonMaximumSuppression(decodedBoxes, scoreWithOneClass, in_indexes, out_indexes,
nms_threshold);
for (int k = 0; k < out_indexes.size(); k++) {
outBuff[outBoxNum][0] = out_indexes[k]; //image id
outBuff[outBoxNum][1] = i; //labelid
outBuff[outBoxNum][2] = scoreWithOneClass[out_indexes[k]]; //scores
outBuff[outBoxNum][3] =
decodedBoxes[out_indexes[k]].xmin * inputImageWidth / 300;
outBuff[outBoxNum][4] =
decodedBoxes[out_indexes[k]].ymin * inputImageHeight / 300;
outBuff[outBoxNum][5] =
decodedBoxes[out_indexes[k]].xmax * inputImageWidth / 300;
outBuff[outBoxNum][6] =
decodedBoxes[out_indexes[k]].ymax * inputImageHeight / 300;
outBoxNum++;
}
}
if (in_indexes.size() == 0) {
continue;
MS_PRINT("outBoxNum %d", outBoxNum);
for (int i = 0; i < outBoxNum; ++i) {
std::string tmpid_str = std::to_string(outBuff[i][0]);
result += tmpid_str; // image ID
result += "_";
// tmpid_str = std::to_string(outBuff[i][1]);
MS_PRINT("label_classes i %d, outBuff %d",i, (int) outBuff[i][1]);
tmpid_str = label_classes[(int) outBuff[i][1]];
result += tmpid_str; // label id
result += "_";
tmpid_str = std::to_string(outBuff[i][2]);
result += tmpid_str; // scores
result += "_";
tmpid_str = std::to_string(outBuff[i][3]);
result += tmpid_str; // xmin
result += "_";
tmpid_str = std::to_string(outBuff[i][4]);
result += tmpid_str; // ymin
result += "_";
tmpid_str = std::to_string(outBuff[i][5]);
result += tmpid_str; // xmax
result += "_";
tmpid_str = std::to_string(outBuff[i][6]);
result += tmpid_str; // ymax
result += ";";
}
sort(in_indexes.begin(), in_indexes.end(),
[&](int a, int b) { return scoreWithOneClass[a] > scoreWithOneClass[b]; });
std::vector<int> out_indexes;
nonMaximumSuppression(decodedBoxes, scoreWithOneClass, in_indexes, out_indexes,
nms_threshold);
for (int k = 0; k < out_indexes.size(); k++) {
outBuff[outBoxNum][0] = out_indexes[k]; //image id
outBuff[outBoxNum][1] = i; //labelid
outBuff[outBoxNum][2] = scoreWithOneClass[out_indexes[k]]; //scores
outBuff[outBoxNum][3] =
decodedBoxes[out_indexes[k]].xmin * inputImageWidth / 300;
outBuff[outBoxNum][4] =
decodedBoxes[out_indexes[k]].ymin * inputImageHeight / 300;
outBuff[outBoxNum][5] =
decodedBoxes[out_indexes[k]].xmax * inputImageWidth / 300;
outBuff[outBoxNum][6] =
decodedBoxes[out_indexes[k]].ymax * inputImageHeight / 300;
outBoxNum++;
}
}
MS_PRINT("outBoxNum %d", outBoxNum);
for (int i = 0; i < outBoxNum; ++i) {
std::string tmpid_str = std::to_string(outBuff[i][0]);
result += tmpid_str; // image ID
result += "_";
// tmpid_str = std::to_string(outBuff[i][1]);
MS_PRINT("label_classes i %d, outBuff %d",i, (int) outBuff[i][1]);
tmpid_str = label_classes[(int) outBuff[i][1]];
result += tmpid_str; // label id
result += "_";
tmpid_str = std::to_string(outBuff[i][2]);
result += tmpid_str; // scores
result += "_";
tmpid_str = std::to_string(outBuff[i][3]);
result += tmpid_str; // xmin
result += "_";
tmpid_str = std::to_string(outBuff[i][4]);
result += tmpid_str; // ymin
result += "_";
tmpid_str = std::to_string(outBuff[i][5]);
result += tmpid_str; // xmax
result += "_";
tmpid_str = std::to_string(outBuff[i][6]);
result += tmpid_str; // ymax
result += ";";
}
return result;
}
```
return result;
}
```

View File

@ -27,7 +27,7 @@
2. 连接Android设备运行目标检测示例应用程序。
通过USB连接Android设备调试点击`Run 'app'`即可在你的设备上运行本示例项目。
* 注编译过程中Android Studio会自动下载MindSpore Lite、OpenCV、模型文件等相关依赖项,编译过程需做耐心等待。
* 注编译过程中Android Studio会自动下载MindSpore Lite、模型文件等相关依赖项编译过程需做耐心等待。
![run_app](images/run_app.PNG)
@ -85,9 +85,9 @@ app
### 配置MindSpore Lite依赖项
Android JNI层调用MindSpore C++ API时需要相关库文件支持。可通过MindSpore Lite[源码编译](https://www.mindspore.cn/lite/docs/zh-CN/master/deploy.html)生成`libmindspore-lite.so`库文件
Android JNI层调用MindSpore C++ API时需要相关库文件支持。可通过MindSpore Lite[源码编译](https://www.mindspore.cn/lite/tutorial/zh-CN/master/build.html)生成"mindspore-lite-X.X.X-mindata-armXX-cpu"库文件包(包含`libmindspore-lite.so`库文件和相关头文件,可包含多个兼容架构)
在Android Studio中将编译完成的mindspore-lite-X.X.X-mindata-armXX-cpu压缩包(包含`libmindspore-lite.so`库文件和相关头文件,可包含多个兼容架构)解压之后放置在APP工程的`app/src/main/cpp`目录下并在app的`build.gradle`文件中配置CMake编译支持以及`arm64-v8a`和`armeabi-v7a`的编译支持,如下所示:
在Android Studio中将编译完成的mindspore-lite-X.X.X-mindata-armXX-cpu压缩包解压之后放置在APP工程的`app/src/main/cpp`目录下并在app的`build.gradle`文件中配置CMake编译支持以及`arm64-v8a`和`armeabi-v7a`的编译支持,如下所示:
```
android{
defaultConfig{
@ -130,7 +130,7 @@ target_link_libraries(
* 注:若自动下载失败,请手动下载相关库文件并将其放在对应位置:
* libmindspore-lite.so [下载链接](https://download.mindspore.cn/model_zoo/official/lite/lib/mindspore%20version%200.7/libmindspore-lite.so)
* mindspore-lite-1.0.0-minddata-arm64-cpu.tar.gz [下载链接](https://download.mindspore.cn/model_zoo/official/lite/lib/mindspore%20version%201.0/mindspore-lite-1.0.0-minddata-arm64-cpu.tar.gz)
### 下载及部署模型文件
@ -235,7 +235,7 @@ target_link_libraries(
3. 进行模型推理前输入tensor格式为 NHWCshape为1:300:300:3格式为RGB, 并对输入tensor做标准化处理.
```cpp
bool PreProcessImageData(LiteMat &lite_mat_bgr,LiteMat &lite_norm_mat_cut) {
bool PreProcessImageData(LiteMat &lite_mat_bgr,LiteMat &lite_norm_mat_cut) {
bool ret=false;
LiteMat lite_mat_resize;
ret = ResizeBilinear(lite_mat_bgr, lite_mat_resize, 300, 300);
@ -273,12 +273,12 @@ bool PreProcessImageData(LiteMat &lite_mat_bgr,LiteMat &lite_norm_mat_cut) {
std::vector<mindspore::tensor::MSTensor *>> Msout;
std::unordered_map<std::string,
mindspore::tensor::MSTensor *> msOutputs;
for (const auto &name : names) {
for (const auto &name : names) {
auto temp_dat =mSession->GetOutputByTensorName(name);
msOutputs.insert(std::pair<std::string, mindspore::tensor::MSTensor *> {name, temp_dat});
}
std::string retStr = ProcessRunnetResult(msOutputs, ret);
```
```
- 模型有2个输出输出1是目标的类别置信度维度为11917: 81 输出2是目标的矩形框坐标偏移量维度为1:1917:4。 为了得出目标的实际矩形框,需要根据偏移量计算出矩形框的位置。这部分在 getDefaultBoxes中实现。

View File

@ -20,6 +20,7 @@
#define MS_PRINT(format, ...) __android_log_print(ANDROID_LOG_INFO, "MSJNI", format, ##__VA_ARGS__)
SSDModelUtil::~SSDModelUtil(void) {}
/**
* SSD model util constructor.
@ -61,8 +62,7 @@ std::string SSDModelUtil::getDecodeResult(float *branchScores, float *branchBoxD
}
// NMS processing.
ssd_boxes_decode(tmpBox, decodedBoxes);
// const float nms_threshold = 0.6;
ssd_boxes_decode(tmpBox, decodedBoxes, 0.1, 0.2, 1917);
const float nms_threshold = 0.3;
for (int i = 1; i < 81; i++) {
std::vector<int> in_indexes;

View File

@ -26,6 +26,8 @@ class SSDModelUtil {
// Constructor.
SSDModelUtil(int srcImageWidth, int srcImgHeight);
~SSDModelUtil();
/**
* Return the SSD model post-processing result.
* @param branchScores
@ -34,10 +36,6 @@ class SSDModelUtil {
*/
std::string getDecodeResult(float *branchScores, float *branchBoxData);
// ============= variables =============.
int inputImageHeight;
int inputImageWidth;
struct NormalBox {
float y;
float x;
@ -64,7 +62,8 @@ class SSDModelUtil {
private:
std::vector<struct NormalBox> mDefaultBoxes;
int inputImageHeight;
int inputImageWidth;
void getDefaultBoxes();
@ -80,7 +79,6 @@ class SSDModelUtil {
double IOU(float r1[4], float r2[4]);
// ============= variables =============.
struct network {
int model_input_height = 300;