From ca6d289e5cc3560419f84e2771662a173bf48341 Mon Sep 17 00:00:00 2001 From: chenweitao_295 Date: Sat, 29 May 2021 17:38:52 +0800 Subject: [PATCH] tinydarknet add 310 infer --- model_zoo/official/cv/tinydarknet/README.md | 64 ++++++- .../official/cv/tinydarknet/README_CN.md | 61 ++++++- .../ascend310_infer/CMakeLists.txt | 14 ++ .../cv/tinydarknet/ascend310_infer/build.sh | 23 +++ .../tinydarknet/ascend310_infer/inc/utils.h | 32 ++++ .../tinydarknet/ascend310_infer/src/main.cc | 159 ++++++++++++++++++ .../tinydarknet/ascend310_infer/src/utils.cc | 130 ++++++++++++++ model_zoo/official/cv/tinydarknet/export.py | 10 +- .../official/cv/tinydarknet/postprocess.py | 72 ++++++++ .../cv/tinydarknet/scripts/run_infer_310.sh | 108 ++++++++++++ 10 files changed, 662 insertions(+), 11 deletions(-) create mode 100644 model_zoo/official/cv/tinydarknet/ascend310_infer/CMakeLists.txt create mode 100644 model_zoo/official/cv/tinydarknet/ascend310_infer/build.sh create mode 100644 model_zoo/official/cv/tinydarknet/ascend310_infer/inc/utils.h create mode 100644 model_zoo/official/cv/tinydarknet/ascend310_infer/src/main.cc create mode 100644 model_zoo/official/cv/tinydarknet/ascend310_infer/src/utils.cc create mode 100644 model_zoo/official/cv/tinydarknet/postprocess.py create mode 100644 model_zoo/official/cv/tinydarknet/scripts/run_infer_310.sh diff --git a/model_zoo/official/cv/tinydarknet/README.md b/model_zoo/official/cv/tinydarknet/README.md index d17a0dd59d5..46568b20dfc 100644 --- a/model_zoo/official/cv/tinydarknet/README.md +++ b/model_zoo/official/cv/tinydarknet/README.md @@ -14,9 +14,14 @@ - [Distributed Training](#distributed-training) - [Evaluation Procsee](#evaluation-process) - [Evaluation](#evaluation) + - [Inference Process](#inference-process) + - [Export MindIR](#export-mindir) + - [Infer on Ascend310](#infer-on-ascend310) + - [result](#result) - [Model Description](#model-description) - [Performance](#performance) - [Training Performance](#training-performance) + - [Evaluation Performance](#evaluation-performance) - [Inference Performance](#inference-performance) - [ModelZoo Homepage](#modelzoo-homepage) @@ -97,12 +102,14 @@ For more details, please refer the specify script. ```bash ├── tinydarknet - ├── README.md // descriptions about Tiny-Darknet in English - ├── README_CN.md // descriptions about Tiny-Darknet in Chinese + ├── README.md // descriptions about Tiny-Darknet in English + ├── README_CN.md // descriptions about Tiny-Darknet in Chinese + ├── ascend310_infer // application for 310 inference ├── scripts ├──run_standalone_train.sh // shell script for single on Ascend ├──run_distribute_train.sh // shell script for distributed on Ascend ├──run_eval.sh // shell script for evaluation on Ascend + ├──run_infer_310.sh // shell script for inference on Ascend310 ├── src ├─lr_scheduler //learning rate scheduler ├─__init__.py // init @@ -116,7 +123,8 @@ For more details, please refer the specify script. ├── train.py // training script ├── eval.py // evaluation script ├── export.py // export checkpoint file into air/onnx - ├── mindspore_hub_conf.py // hub config + ├── mindspore_hub_conf.py // hub config + ├── postprocess.py // postprocess script ``` @@ -235,6 +243,40 @@ For more configuration details, please refer the script config.py. accuracy: {'top_1_accuracy': 0.5871979166666667, 'top_5_accuracy': 0.8175280448717949} ``` +## [Inference process](#contents) + +### Export MindIR + +```shell +# Ascend310 inference +python export.py --dataset [DATASET] --file_name [FILE_NAME] --file_format [EXPORT_FORMAT] +``` + +The parameter does not have the ckpt_file option. Please store the ckpt file according to the path of the parameter `checkpoint_path` in `config.py`. +`EXPORT_FORMAT` should be in ["AIR", "MINDIR"] + +### Infer on Ascend310 + +Before performing inference, the mindir file must be exported by `export.py` script. We only provide an example of inference using MINDIR model. +Current batch_size can only be set to 1. + +```shell +# Ascend310 inference +bash run_infer_310.sh [MINDIR_PATH] [DATA_PATH] [LABEL_PATH] [DVPP] [DEVICE_ID] +``` + +- `LABEL_PATH` label.txt path. Write a py script to sort the category under the dataset, map the file names under the categories and category sort values,Such as[file name : sort value], and write the mapping results to the labe.txt file. +- `DVPP` is mandatory, and must choose from ["DVPP", "CPU"], it's case-insensitive.The size of the picture that MobilenetV2 performs inference is [224, 224], the DVPP hardware limits the width of divisible by 16, and the height is divisible by 2. The network conforms to the standard, and the network can pre-process the image through DVPP. +- `DEVICE_ID` is optional, default value is 0. + +### result + +Inference result is saved in current path, you can find result like this in acc.log file. + +```bash +'top_1_accuracy': 59.07%, 'top_5_accuracy': 81.73% +``` + # [Model Description](#contents) ## [Performance](#contents) @@ -256,7 +298,7 @@ For more configuration details, please refer the script config.py. | Parameters(M) | 4.0M | | Scripts | [Tiny-Darknet Scripts](https://gitee.com/mindspore/mindspore/tree/master/model_zoo/official/cv/tinydarknet) | -### [Inference Performance](#contents) +### [Evaluation Performance](#contents) | Parameters | Ascend | | ------------------- | --------------------------- | @@ -270,6 +312,20 @@ For more configuration details, please refer the script config.py. | Accuracy | 8 pc Top-1: 58.7%; Top-5: 81.7% | | Model for inference | 11.6M (.ckpt file) | +### [Inference Performance](#contents) + +| Parameters | Ascend | +| ------------------- | --------------------------- | +| Model Version | TinyDarknet | +| Resource | Ascend 310; Euler2.8 | +| Uploaded Date | 29/05/2021 (month/day/year) | +| MindSpore Version | 1.2.0 | +| Dataset | ImageNet | +| batch_size | 1 | +| outputs | Accuracy | +| Accuracy | Top-1: 59.07%; Top-5: 81.73%| +| Model for inference | 10.3M(.ckpt file) | + # [ModelZoo Homepage](#contents) Please check the official[homepage](https://gitee.com/mindspore/mindspore/tree/master/model_zoo). diff --git a/model_zoo/official/cv/tinydarknet/README_CN.md b/model_zoo/official/cv/tinydarknet/README_CN.md index 8849345d6c3..d5c67b0618b 100644 --- a/model_zoo/official/cv/tinydarknet/README_CN.md +++ b/model_zoo/official/cv/tinydarknet/README_CN.md @@ -14,10 +14,15 @@ - [分布式训练](#分布式训练) - [评估过程](#评估过程) - [评估](#评估) + - [推理过程](#推理过程) + - [导出MindIR](#导出mindir) + - [在Ascend310执行推理](#在ascend310执行推理) + - [结果](#结果) - [模型描述](#模型描述) - [性能](#性能) - [训练性能](#训练性能) - [评估性能](#评估性能) + - [推理性能](#推理性能) - [ModelZoo主页](#modelzoo主页) # [Tiny-DarkNet描述](#目录) @@ -105,12 +110,14 @@ Tiny-DarkNet是Joseph Chet Redmon等人提出的一个16层的针对于经典的 ```bash ├── tinydarknet - ├── README.md // Tiny-Darknet英文说明 + ├── README.md // Tiny-Darknet英文说明 ├── README_CN.md // Tiny-Darknet中文说明 + ├── ascend310_infer // 用于310推理 ├── scripts - ├──run_standalone_train.sh // Ascend单卡训练shell脚本 - ├──run_distribute_train.sh // Ascend分布式训练shell脚本 + ├──run_standalone_train.sh // Ascend单卡训练shell脚本 + ├──run_distribute_train.sh // Ascend分布式训练shell脚本 ├──run_eval.sh // Ascend评估shell脚本 + ├──run_infer_310.sh // Ascend310推理shell脚本 ├── src ├─lr_scheduler //学习率策略 ├─__init__.py // 初始化文件 @@ -125,6 +132,7 @@ Tiny-DarkNet是Joseph Chet Redmon等人提出的一个16层的针对于经典的 ├── eval.py // 评估脚本 ├── export.py // 导出checkpoint文件 ├── mindspore_hub_conf.py // hub配置文件 + ├── postprocess.py // 310推理后处理脚本 ``` @@ -243,6 +251,39 @@ Tiny-DarkNet是Joseph Chet Redmon等人提出的一个16层的针对于经典的 accuracy: {'top_1_accuracy': 0.5871979166666667, 'top_5_accuracy': 0.8175280448717949} ``` +## 推理过程 + +### 导出MindIR + +```shell +python export.py --dataset [DATASET] --file_name [FILE_NAME] --file_format [EXPORT_FORMAT] +``` + +参数没有ckpt_file选项,ckpt文件请按照`config.py`中参数`checkpoint_path`的路径存放。 +`EXPORT_FORMAT` 可选 ["AIR", "MINDIR"]. + +### 在Ascend310执行推理 + +在执行推理前,mindir文件必须通过`export.py`脚本导出。以下展示了使用mindir模型执行推理的示例。 +目前仅支持batch_size为1的推理。 + +```shell +# Ascend310 inference +bash run_infer_310.sh [MINDIR_PATH] [DATA_PATH] [LABEL_PATH] [DVPP] [DEVICE_ID] +``` + +- `LABEL_PATH` label.txt存放的路径,写一个py脚本对数据集下的类别名进行排序,对类别下的文件名和类别排序值做映射,例如[文件名:排序值],将映射结果写到labe.txt文件中。 +- `DVPP` 为必填项,需要在["DVPP", "CPU"]选择,大小写均可。注意目前仅支持CPU模式。 +- `DEVICE_ID` 可选,默认值为0。 + +### 结果 + +推理结果保存在脚本执行的当前路径,你可以在acc.log中看到以下精度计算结果。 + +```bash +'top_1_accuracy': 59.07%, 'top_5_accuracy': 81.73% +``` + # [模型描述](#目录) ## [性能](#目录) @@ -278,6 +319,20 @@ Tiny-DarkNet是Joseph Chet Redmon等人提出的一个16层的针对于经典的 | 准确率 | 8卡 Top-1: 58.7%; Top-5: 81.7% | | 推理模型 | 11.6M (.ckpt文件) | +### [推理性能](#目录) + +| 参数 | Ascend | +| -------------- | ---------------------------| +| 模型版本 | TinyDarknet | +| 资源 | Ascend 310;系统 Euler2.8 | +| 上传日期 | 2021-05-29 | +| MindSpore版本 | 1.2.0 | +| 数据集 | ImageNet | +| batch_size | 1 | +| 输出 | Accuracy | +| 准确率 | Top-1: 59.07% Top-5: 81.73% | +| 推理模型 | 10.3M(.ckpt文件) | + # [ModelZoo主页](#目录) 请参考官方[主页](https://gitee.com/mindspore/mindspore/tree/master/model_zoo). diff --git a/model_zoo/official/cv/tinydarknet/ascend310_infer/CMakeLists.txt b/model_zoo/official/cv/tinydarknet/ascend310_infer/CMakeLists.txt new file mode 100644 index 00000000000..ee3c8544734 --- /dev/null +++ b/model_zoo/official/cv/tinydarknet/ascend310_infer/CMakeLists.txt @@ -0,0 +1,14 @@ +cmake_minimum_required(VERSION 3.14.1) +project(Ascend310Infer) +add_compile_definitions(_GLIBCXX_USE_CXX11_ABI=0) +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O0 -g -std=c++17 -Werror -Wall -fPIE -Wl,--allow-shlib-undefined") +set(PROJECT_SRC_ROOT ${CMAKE_CURRENT_LIST_DIR}/) +option(MINDSPORE_PATH "mindspore install path" "") +include_directories(${MINDSPORE_PATH}) +include_directories(${MINDSPORE_PATH}/include) +include_directories(${PROJECT_SRC_ROOT}) +find_library(MS_LIB libmindspore.so ${MINDSPORE_PATH}/lib) +file(GLOB_RECURSE MD_LIB ${MINDSPORE_PATH}/_c_dataengine*) + +add_executable(main src/main.cc src/utils.cc) +target_link_libraries(main ${MS_LIB} ${MD_LIB} gflags) diff --git a/model_zoo/official/cv/tinydarknet/ascend310_infer/build.sh b/model_zoo/official/cv/tinydarknet/ascend310_infer/build.sh new file mode 100644 index 00000000000..770a8851efa --- /dev/null +++ b/model_zoo/official/cv/tinydarknet/ascend310_infer/build.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +if [ ! -d out ]; then + mkdir out +fi +cd out || exit +cmake .. \ + -DMINDSPORE_PATH="`pip show mindspore-ascend | grep Location | awk '{print $2"/mindspore"}' | xargs realpath`" +make diff --git a/model_zoo/official/cv/tinydarknet/ascend310_infer/inc/utils.h b/model_zoo/official/cv/tinydarknet/ascend310_infer/inc/utils.h new file mode 100644 index 00000000000..efebe03a8c1 --- /dev/null +++ b/model_zoo/official/cv/tinydarknet/ascend310_infer/inc/utils.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_INFERENCE_UTILS_H_ +#define MINDSPORE_INFERENCE_UTILS_H_ + +#include +#include +#include +#include +#include +#include "include/api/types.h" + +std::vector GetAllFiles(std::string_view dirName); +DIR *OpenDir(std::string_view dirName); +std::string RealPath(std::string_view path); +mindspore::MSTensor ReadFileToTensor(const std::string &file); +int WriteResult(const std::string& imageFile, const std::vector &outputs); +#endif diff --git a/model_zoo/official/cv/tinydarknet/ascend310_infer/src/main.cc b/model_zoo/official/cv/tinydarknet/ascend310_infer/src/main.cc new file mode 100644 index 00000000000..d1e12f5130a --- /dev/null +++ b/model_zoo/official/cv/tinydarknet/ascend310_infer/src/main.cc @@ -0,0 +1,159 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "include/api/model.h" +#include "include/api/context.h" +#include "include/api/types.h" +#include "include/api/serialization.h" +#include "include/dataset/vision_ascend.h" +#include "include/dataset/execute.h" +#include "include/dataset/vision.h" +#include "inc/utils.h" + +using mindspore::Context; +using mindspore::Serialization; +using mindspore::Model; +using mindspore::Status; +using mindspore::ModelType; +using mindspore::GraphCell; +using mindspore::kSuccess; +using mindspore::MSTensor; +using mindspore::dataset::Execute; +using mindspore::dataset::TensorTransform; +using mindspore::dataset::vision::Resize; +using mindspore::dataset::vision::HWC2CHW; +using mindspore::dataset::vision::Normalize; +using mindspore::dataset::vision::Decode; +using mindspore::dataset::vision::CenterCrop; + + +DEFINE_string(mindir_path, "", "mindir path"); +DEFINE_string(dataset_path, ".", "dataset path"); +DEFINE_int32(device_id, 0, "device id"); +DEFINE_string(aipp_path, "../../scripts/aipp.cfg", "aipp path"); +DEFINE_string(cpu_dvpp, "", "cpu or dvpp process"); +DEFINE_int32(image_height, 224, "image height"); +DEFINE_int32(image_width, 224, "image width"); + +int main(int argc, char **argv) { + gflags::ParseCommandLineFlags(&argc, &argv, true); + if (RealPath(FLAGS_mindir_path).empty()) { + std::cout << "Invalid mindir" << std::endl; + return 1; + } + + auto context = std::make_shared(); + auto ascend310 = std::make_shared(); + ascend310->SetDeviceID(FLAGS_device_id); + ascend310->SetBufferOptimizeMode("off_optimize"); + context->MutableDeviceInfo().push_back(ascend310); + mindspore::Graph graph; + Serialization::Load(FLAGS_mindir_path, ModelType::kMindIR, &graph); + if (FLAGS_cpu_dvpp == "DVPP") { + if (RealPath(FLAGS_aipp_path).empty()) { + std::cout << "Invalid aipp path" << std::endl; + return 1; + } else { + ascend310->SetInsertOpConfigPath(FLAGS_aipp_path); + } + } + + Model model; + Status ret = model.Build(GraphCell(graph), context); + if (ret != kSuccess) { + std::cout << "ERROR: Build failed." << std::endl; + return 1; + } + + std::shared_ptr decode(new Decode()); + std::shared_ptr resize(new Resize({256})); + std::shared_ptr dvpp_resize(new Resize({256, 256})); + auto crop_size = {FLAGS_image_height, FLAGS_image_width}; + std::shared_ptr center_crop(new CenterCrop(crop_size)); + std::shared_ptr normalize(new Normalize({123.675, 116.28, 103.53}, + {58.395, 57.120, 57.375})); + std::shared_ptr hwc2chw(new HWC2CHW()); + Execute transform({decode, resize, center_crop, normalize, hwc2chw}); + Execute dvpptransform({decode, dvpp_resize}); + + auto all_files = GetAllFiles(FLAGS_dataset_path); + std::map costTime_map; + size_t size = all_files.size(); + + for (size_t i = 0; i < size; ++i) { + struct timeval start = {0}; + struct timeval end = {0}; + double startTimeMs; + double endTimeMs; + std::vector inputs; + std::vector outputs; + std::cout << "Start predict input files:" << all_files[i] << std::endl; + if (FLAGS_cpu_dvpp == "DVPP") { + auto imgDvpp = std::make_shared(); + dvpptransform(ReadFileToTensor(all_files[i]), imgDvpp.get()); + inputs.emplace_back(imgDvpp->Name(), imgDvpp->DataType(), imgDvpp->Shape(), + imgDvpp->Data().get(), imgDvpp->DataSize()); + } else { + auto img = MSTensor(); + auto image = ReadFileToTensor(all_files[i]); + transform(image, &img); + std::vector model_inputs = model.GetInputs(); + inputs.emplace_back(model_inputs[0].Name(), model_inputs[0].DataType(), model_inputs[0].Shape(), + img.Data().get(), img.DataSize()); + } + + gettimeofday(&start, nullptr); + ret = model.Predict(inputs, &outputs); + gettimeofday(&end, nullptr); + if (ret != kSuccess) { + std::cout << "Predict " << all_files[i] << " failed." << std::endl; + return 1; + } + startTimeMs = (1.0 * start.tv_sec * 1000000 + start.tv_usec) / 1000; + endTimeMs = (1.0 * end.tv_sec * 1000000 + end.tv_usec) / 1000; + costTime_map.insert(std::pair(startTimeMs, endTimeMs)); + WriteResult(all_files[i], outputs); + } + double average = 0.0; + int inferCount = 0; + + for (auto iter = costTime_map.begin(); iter != costTime_map.end(); iter++) { + double diff = 0.0; + diff = iter->second - iter->first; + average += diff; + inferCount++; + } + average = average / inferCount; + std::stringstream timeCost; + timeCost << "NN inference cost average time: "<< average << " ms of infer_count " << inferCount << std::endl; + std::cout << "NN inference cost average time: "<< average << "ms of infer_count " << inferCount << std::endl; + std::string fileName = "./time_Result" + std::string("/test_perform_static.txt"); + std::ofstream fileStream(fileName.c_str(), std::ios::trunc); + fileStream << timeCost.str(); + fileStream.close(); + costTime_map.clear(); + return 0; +} diff --git a/model_zoo/official/cv/tinydarknet/ascend310_infer/src/utils.cc b/model_zoo/official/cv/tinydarknet/ascend310_infer/src/utils.cc new file mode 100644 index 00000000000..cc5e872a937 --- /dev/null +++ b/model_zoo/official/cv/tinydarknet/ascend310_infer/src/utils.cc @@ -0,0 +1,130 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "inc/utils.h" + +#include +#include +#include + +using mindspore::MSTensor; +using mindspore::DataType; + +std::vector GetAllFiles(std::string_view dirName) { + struct dirent *filename; + DIR *dir = OpenDir(dirName); + if (dir == nullptr) { + return {}; + } + std::vector res; + while ((filename = readdir(dir)) != nullptr) { + std::string dName = std::string(filename->d_name); + if (dName == "." || dName == ".." || filename->d_type != DT_REG) { + continue; + } + res.emplace_back(std::string(dirName) + "/" + filename->d_name); + } + std::sort(res.begin(), res.end()); + for (auto &f : res) { + std::cout << "image file: " << f << std::endl; + } + return res; +} + +int WriteResult(const std::string& imageFile, const std::vector &outputs) { + std::string homePath = "./result_Files"; + for (size_t i = 0; i < outputs.size(); ++i) { + size_t outputSize; + std::shared_ptr netOutput; + netOutput = outputs[i].Data(); + outputSize = outputs[i].DataSize(); + int pos = imageFile.rfind('/'); + std::string fileName(imageFile, pos + 1); + fileName.replace(fileName.find('.'), fileName.size() - fileName.find('.'), ".bin"); + std::string outFileName = homePath + "/" + fileName; + FILE * outputFile = fopen(outFileName.c_str(), "wb"); + fwrite(netOutput.get(), outputSize, sizeof(char), outputFile); + fclose(outputFile); + outputFile = nullptr; + } + return 0; +} + +mindspore::MSTensor ReadFileToTensor(const std::string &file) { + if (file.empty()) { + std::cout << "Pointer file is nullptr" << std::endl; + return mindspore::MSTensor(); + } + + std::ifstream ifs(file); + if (!ifs.good()) { + std::cout << "File: " << file << " is not exist" << std::endl; + return mindspore::MSTensor(); + } + + if (!ifs.is_open()) { + std::cout << "File: " << file << "open failed" << std::endl; + return mindspore::MSTensor(); + } + + ifs.seekg(0, std::ios::end); + size_t size = ifs.tellg(); + mindspore::MSTensor buffer(file, mindspore::DataType::kNumberTypeUInt8, {static_cast(size)}, nullptr, size); + + ifs.seekg(0, std::ios::beg); + ifs.read(reinterpret_cast(buffer.MutableData()), size); + ifs.close(); + + return buffer; +} + + +DIR *OpenDir(std::string_view dirName) { + if (dirName.empty()) { + std::cout << " dirName is null ! " << std::endl; + return nullptr; + } + std::string realPath = RealPath(dirName); + struct stat s; + lstat(realPath.c_str(), &s); + if (!S_ISDIR(s.st_mode)) { + std::cout << "dirName is not a valid directory !" << std::endl; + return nullptr; + } + DIR *dir; + dir = opendir(realPath.c_str()); + if (dir == nullptr) { + std::cout << "Can not open dir " << dirName << std::endl; + return nullptr; + } + std::cout << "Successfully opened the dir " << dirName << std::endl; + return dir; +} + +std::string RealPath(std::string_view path) { + char realPathMem[PATH_MAX] = {0}; + char *realPathRet = nullptr; + realPathRet = realpath(path.data(), realPathMem); + + if (realPathRet == nullptr) { + std::cout << "File: " << path << " is not exist."; + return ""; + } + + std::string realPath(realPathMem); + std::cout << path << " realpath is: " << realPath << std::endl; + return realPath; +} diff --git a/model_zoo/official/cv/tinydarknet/export.py b/model_zoo/official/cv/tinydarknet/export.py index 4dd8545985d..56bfe98871d 100644 --- a/model_zoo/official/cv/tinydarknet/export.py +++ b/model_zoo/official/cv/tinydarknet/export.py @@ -24,12 +24,15 @@ from mindspore import Tensor from mindspore.train.serialization import load_checkpoint, load_param_into_net, export from src.config import imagenet_cfg -from src.tinydarknet import TinydarkNet +from src.tinydarknet import TinyDarkNet if __name__ == '__main__': parser = argparse.ArgumentParser(description='Classification') parser.add_argument('--dataset_name', type=str, default='imagenet', choices=['imagenet', 'cifar10'], help='dataset name.') + parser.add_argument('--file_format', type=str, default='AIR', choices=['MINDIR', 'AIR'], + help='file format.') + parser.add_argument('--file_name', type=str, default='tinydarknet', help='output file name.') args_opt = parser.parse_args() if args_opt.dataset_name == 'imagenet': @@ -37,12 +40,11 @@ if __name__ == '__main__': else: raise ValueError("dataset is not support.") - net = TinydarkNet(num_classes=cfg.num_classes) + net = TinyDarkNet(num_classes=cfg.num_classes) assert cfg.checkpoint_path is not None, "cfg.checkpoint_path is None." param_dict = load_checkpoint(cfg.checkpoint_path) load_param_into_net(net, param_dict) input_arr = Tensor(np.random.uniform(0.0, 1.0, size=[1, 3, 224, 224]), ms.float32) - export(net, input_arr, file_name=cfg.onnx_filename, file_format="ONNX") - export(net, input_arr, file_name=cfg.air_filename, file_format="AIR") + export(net, input_arr, file_name=args_opt.file_name, file_format=args_opt.file_format) diff --git a/model_zoo/official/cv/tinydarknet/postprocess.py b/model_zoo/official/cv/tinydarknet/postprocess.py new file mode 100644 index 00000000000..c08e5747d95 --- /dev/null +++ b/model_zoo/official/cv/tinydarknet/postprocess.py @@ -0,0 +1,72 @@ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""post process for 310 inference""" +import os +import argparse +import numpy as np + +parser = argparse.ArgumentParser(description='tinydarknet calcul top1 and top5 acc') +parser.add_argument("--result_path", type=str, required=True, default='', help="result file path") +parser.add_argument("--label_file", type=str, required=True, default='', help="label file") +args = parser.parse_args() + + +def get_top5_acc(top_arg, gt_class): + sub_count = 0 + for top5, gt in zip(top_arg, gt_class): + if gt in top5: + sub_count += 1 + return sub_count + + +def read_label(label_file): + with open(label_file, 'r') as f: + lines = f.readlines() + img_dict = {} + for line in lines: + img_id = line.split(':')[0] + label = line.split(':')[1] + img_dict[img_id] = label + return img_dict + + +def cal_acc(result_path, label_file): + img_label = read_label(label_file) + img_tot = 0 + top1_correct = 0 + top5_correct = 0 + result_shape = (1, 1000) + files = os.listdir(result_path) + for file in files: + full_file_path = os.path.join(result_path, file) + if os.path.isfile(full_file_path): + result = np.fromfile(full_file_path, dtype=np.float32).reshape(result_shape) + gt_classes = int(img_label[file.split('.')[0]]) + + top1_output = np.argmax(result, (-1)) + top5_output = np.argsort(result)[:, -5:] + + t1_correct = np.equal(top1_output, gt_classes).sum() + top1_correct += t1_correct + top5_correct += get_top5_acc(top5_output, [gt_classes]) + img_tot += 1 + acc1 = 100 * top1_correct / img_tot + acc5 = 100 * top5_correct / img_tot + print('total={}, top1_correct={}, acc={:.2f}%'.format(img_tot, top1_correct, acc1)) + print('total={}, top5_correct={}, acc={:.2f}%'.format(img_tot, top5_correct, acc5)) + + +if __name__ == '__main__': + cal_acc(args.result_path, args.label_file) diff --git a/model_zoo/official/cv/tinydarknet/scripts/run_infer_310.sh b/model_zoo/official/cv/tinydarknet/scripts/run_infer_310.sh new file mode 100644 index 00000000000..ef7934e7b43 --- /dev/null +++ b/model_zoo/official/cv/tinydarknet/scripts/run_infer_310.sh @@ -0,0 +1,108 @@ +#!/bin/bash +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +if [[ $# -lt 4 || $# -gt 5 ]]; then + echo "Usage: bash run_infer_310.sh [MINDIR_PATH] [DATA_PATH] [LABEL_FILE] [DVPP] [DEVICE_ID] + DVPP is mandatory, and must choose from [DVPP|CPU], it's case-insensitive + DEVICE_ID is optional, it can be set by environment variable device_id, otherwise the value is zero" +exit 1 +fi + +get_real_path(){ + if [ "${1:0:1}" == "/" ]; then + echo "$1" + else + echo "$(realpath -m $PWD/$1)" + fi +} +model=$(get_real_path $1) +data_path=$(get_real_path $2) +label_file=$(get_real_path $3) +DVPP=${4^^} + +device_id=0 +if [ $# == 5 ]; then + device_id=$5 +fi + +echo "mindir name: "$model +echo "dataset path: "$data_path +echo "label file: "$label_file +echo "image process mode: "$DVPP +echo "device id: "$device_id + +export ASCEND_HOME=/usr/local/Ascend/ +if [ -d ${ASCEND_HOME}/ascend-toolkit ]; then + export PATH=$ASCEND_HOME/ascend-toolkit/latest/fwkacllib/ccec_compiler/bin:$ASCEND_HOME/ascend-toolkit/latest/atc/bin:$PATH + export LD_LIBRARY_PATH=/usr/local/lib:$ASCEND_HOME/ascend-toolkit/latest/atc/lib64:$ASCEND_HOME/ascend-toolkit/latest/fwkacllib/lib64:$ASCEND_HOME/driver/lib64:$ASCEND_HOME/add-ons:$LD_LIBRARY_PATH + export TBE_IMPL_PATH=$ASCEND_HOME/ascend-toolkit/latest/opp/op_impl/built-in/ai_core/tbe + export PYTHONPATH=${TBE_IMPL_PATH}:$ASCEND_HOME/ascend-toolkit/latest/fwkacllib/python/site-packages:$PYTHONPATH + export ASCEND_OPP_PATH=$ASCEND_HOME/ascend-toolkit/latest/opp +else + export PATH=$ASCEND_HOME/atc/ccec_compiler/bin:$ASCEND_HOME/atc/bin:$PATH + export LD_LIBRARY_PATH=/usr/local/lib:$ASCEND_HOME/atc/lib64:$ASCEND_HOME/acllib/lib64:$ASCEND_HOME/driver/lib64:$ASCEND_HOME/add-ons:$LD_LIBRARY_PATH + export PYTHONPATH=$ASCEND_HOME/atc/python/site-packages:$PYTHONPATH + export ASCEND_OPP_PATH=$ASCEND_HOME/opp +fi + +function compile_app() +{ + cd ../ascend310_infer || exit + bash build.sh &> build.log +} + +function infer() +{ + cd - || exit + if [ -d result_Files ]; then + rm -rf ./result_Files + fi + if [ -d time_Result ]; then + rm -rf ./time_Result + fi + mkdir result_Files + mkdir time_Result + if [ "$DVPP" == "DVPP" ];then + echo "TinyDarknet CURRENTLY ONLY SUPPORT CPU MODE" + exit 1 + elif [ "$DVPP" == "CPU" ]; then + ../ascend310_infer/out/main --mindir_path=$model --dataset_path=$data_path --cpu_dvpp=$DVPP --device_id=$device_id --image_height=224 --image_width=224 &> infer.log + else + echo "image process mode must be in [DVPP|CPU]" + exit 1 + fi +} + +function cal_acc() +{ + python3.7 ../postprocess.py --result_path=./result_Files --label_file=$label_file &> acc.log & +} + +compile_app +if [ $? -ne 0 ]; then + echo "compile app code failed" + exit 1 +fi +infer +if [ $? -ne 0 ]; then + echo " execute inference failed" + exit 1 +fi +cal_acc +if [ $? -ne 0 ]; then + echo "calculate accuracy failed" + exit 1 +fi \ No newline at end of file