[模型训练] 支持r1.3分支ssd sdk推理

[修改人] haoripei
[审核人] chenshushu
This commit is contained in:
Atlas_hrp 2021-09-01 10:13:44 +08:00
parent a620b2a0ae
commit 18ffe226ba
7 changed files with 993 additions and 0 deletions

View File

@ -0,0 +1,13 @@
# Copyright (C) 2021.Huawei Technologies Co., Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

View File

@ -0,0 +1,166 @@
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import argparse
import json
import os
from StreamManagerApi import MxDataInput
from StreamManagerApi import StreamManagerApi
SUPPORT_IMG_SUFFIX = (".jpg", ".JPG", ".jpeg", ".JPEG")
parser = argparse.ArgumentParser(
description="SSD MobileNet V1 FPN infer " "example.",
fromfile_prefix_chars="@",
)
parser.add_argument(
"--pipeline_path",
type=str,
help="mxManufacture pipeline file path",
default="./conf/ssd_mobilenet_fpn_ms_mc.pipeline",
)
parser.add_argument(
"--stream_name",
type=str,
help="Infer stream name in the pipeline config file",
default="detection",
)
parser.add_argument(
"--img_path",
type=str,
help="Image pathname, can be a image file or image directory",
default="./test_img",
)
parser.add_argument(
"--res_path",
type=str,
help="Directory to store the inferred result",
default=None,
required=False,
)
args = parser.parse_args()
def infer():
"""Infer images by DVPP + OM. """
pipeline_path = args.pipeline_path
stream_name = args.stream_name.encode()
img_path = os.path.abspath(args.img_path)
res_dir_name = args.res_path
stream_manager_api = StreamManagerApi()
ret = stream_manager_api.InitManager()
if ret != 0:
print("Failed to init Stream manager, ret=%s" % str(ret))
exit()
# create streams by pipeline config file
with open(pipeline_path, "rb") as f:
pipeline_str = f.read()
ret = stream_manager_api.CreateMultipleStreams(pipeline_str)
if ret != 0:
print("Failed to create Stream, ret=%s" % str(ret))
exit()
in_plugin_id = 0
# Construct the input of the stream
data_input = MxDataInput()
if os.path.isfile(img_path) and img_path.endswith(SUPPORT_IMG_SUFFIX):
file_list = [os.path.abspath(img_path)]
else:
file_list = os.listdir(img_path)
file_list = [
os.path.join(img_path, img)
for img in file_list
if img.endswith(SUPPORT_IMG_SUFFIX)
]
if not res_dir_name:
res_dir_name = os.path.join(".", "infer_res")
print(f"res_dir_name={res_dir_name}")
os.makedirs(res_dir_name, exist_ok=True)
pic_infer_dict_list = []
for file_name in file_list:
with open(file_name, "rb") as f:
img_data = f.read()
if not img_data:
print(f"read empty data from img:{file_name}")
continue
data_input.data = img_data
unique_id = stream_manager_api.SendDataWithUniqueId(
stream_name, in_plugin_id, data_input
)
if unique_id < 0:
print("Failed to send data to stream.")
exit()
infer_result = stream_manager_api.GetResultWithUniqueId(
stream_name, unique_id, 3000
)
if infer_result.errorCode != 0:
print(
"GetResultWithUniqueId error. errorCode=%d, errorMsg=%s"
% (infer_result.errorCode, infer_result.data.decode())
)
exit()
pic_infer_dict_list.extend(
parse_img_infer_result(file_name, infer_result)
)
print(f"Inferred image:{file_name} success!")
with open(os.path.join(res_dir_name, "det_result.json"), "w") as fw:
fw.write(json.dumps(pic_infer_dict_list))
stream_manager_api.DestroyAllStreams()
def parse_img_infer_result(file_name, infer_result):
obj_list = json.loads(infer_result.data.decode()).get("MxpiObject", [])
det_obj_list = []
for o in obj_list:
x0, y0, x1, y1 = (
round(o.get("x0"), 4),
round(o.get("y0"), 4),
round(o.get("x1"), 4),
round(o.get("y1"), 4),
)
bbox_for_map = [int(x0), int(y0), int(x1 - x0), int(y1 - y0)]
score = o.get("classVec")[0].get("confidence")
category_id = o.get("classVec")[0].get("classId")
img_fname_without_suffix = os.path.basename(file_name).split(".")[0]
try:
image_id = int(img_fname_without_suffix)
except:
print("exception getting image id.")
image_id = img_fname_without_suffix
det_obj_list.append(
dict(
image_id=image_id,
bbox=bbox_for_map,
category_id=category_id,
score=score,
)
)
return det_obj_list
if __name__ == "__main__":
infer()

View File

@ -0,0 +1,13 @@
# Copyright (C) 2020.Huawei Technologies Co., Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

View File

@ -0,0 +1,101 @@
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import os
from datetime import datetime
from absl import flags
from absl import app
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
PRINT_LINES_TEMPLATE = """
Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = %.3f
Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = %.3f
Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = %.3f
Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = %.3f
Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = %.3f
Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = %.3f
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = %.3f
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = %.3f
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = %.3f
Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = %.3f
Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = %.3f
Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = %.3f
"""
FLAGS = flags.FLAGS
flags.DEFINE_string(
name="annotations_json",
default=None,
help="annotations_json file path name",
)
flags.DEFINE_string(
name="det_result_json", default=None, help="det_result json file"
)
flags.DEFINE_enum(
name="anno_type",
default="bbox",
enum_values=["segm", "bbox", "keypoints"],
help="Annotation type",
)
flags.DEFINE_string(
name="output_path_name",
default=None,
help="Where to out put the result files.",
)
flags.mark_flag_as_required("annotations_json")
flags.mark_flag_as_required("det_result_json")
flags.mark_flag_as_required("output_path_name")
def main(unused_arg):
del unused_arg
out_put_dir = os.path.dirname(FLAGS.output_path_name)
if not os.path.exists(out_put_dir):
os.makedirs(out_put_dir)
fw = open(FLAGS.output_path_name, "a+")
now_time_str = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
head_info = f"{'-'*50}mAP Test starts @ {now_time_str}{'-'*50}\n"
fw.write(head_info)
fw.flush()
cocoGt = COCO(FLAGS.annotations_json)
cocoDt = cocoGt.loadRes(FLAGS.det_result_json)
cocoEval = COCOeval(cocoGt, cocoDt, FLAGS.anno_type)
cocoEval.params.imgIds = sorted(cocoGt.getImgIds())
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
format_lines = [
line for line in PRINT_LINES_TEMPLATE.splitlines() if line.strip()
]
for i, line in enumerate(format_lines):
fw.write(line % cocoEval.stats[i] + "\n")
end_time_str = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
tail_info = f"{'-'*50}mAP Test ends @ {end_time_str}{'-'*50}\n"
fw.write(tail_info)
fw.close()
if __name__ == "__main__":
app.run(main)

View File

@ -0,0 +1,630 @@
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import json
import os
import threading
import time
from datetime import datetime
from threading import Lock
import MxpiDataType_pb2 as MxpiDataType
import cv2
from StreamManagerApi import InProtobufVector
from StreamManagerApi import MxDataInput
from StreamManagerApi import MxProtobufIn
from StreamManagerApi import StreamManagerApi
from StreamManagerApi import StringVector
from absl import app
from absl import flags
BOXED_IMG_DIR = None
TXT_DIR = None
PERF_REPORT_TXT = None
DET_RESULT_RESIZED_JSON = None
DET_RESULT_JSON = None
FLAGS = flags.FLAGS
infer_ret_list_lock = Lock()
det_restore_ratio = dict()
flags.DEFINE_string(
name="img_dir", default=None, help="Directory of images to infer"
)
flags.DEFINE_string(
name="pipeline_config",
default=None,
help="Path name of pipeline configuration file of " "mxManufacture.",
)
flags.DEFINE_string(
name="infer_stream_name",
default=None,
help="Infer stream name configured in pipeline "
"configuration file of mxManufacture",
)
flags.DEFINE_boolean(
name="draw_box",
default=True,
help="Whether out put the inferred image with bounding box",
)
flags.DEFINE_enum(
name="preprocess",
default="OPENCV",
enum_values=["DVPP", "OPENCV"],
help="Preprocess method to use, default OpenCV.",
)
flags.DEFINE_boolean(
name="coco",
default=True,
help="Whether use coco dataset to test performance.",
)
flags.DEFINE_float(
name="score_thresh_for_draw",
default=0.5,
help="Draw bounding box if the confidence greater than.",
)
flags.DEFINE_string(
name="output_dir",
default=None,
help="Where to out put the inferred image with bounding box, if the "
"draw_box is set, this parameter must be set.",
)
flags.DEFINE_integer(
name="how_many_images_to_infer",
default=-1,
help="Infer how many images in img_dir, -1 means all.",
)
flags.DEFINE_integer(
name="infer_timeout_secs",
default=3,
help="Time out(in seconds) to get the infer result. ",
)
flags.DEFINE_integer(
name="model_input_height",
default=640,
help="Image height input to " "model.",
)
flags.DEFINE_integer(
name="model_input_width", default=640, help="Image width input to model."
)
flags.DEFINE_integer(
name="display_step",
default=100,
help="Every how many images to print the inference real speed and "
"progress.",
)
flags.mark_flag_as_required("img_dir")
flags.mark_flag_as_required("pipeline_config")
flags.mark_flag_as_required("infer_stream_name")
flags.mark_flag_as_required("output_dir")
def draw_image(input_image, bboxes, output_img):
# 原图
image = cv2.imread(input_image)
# 模型推理输出数据,需要往后处理代码中增加几行输出文档的代码
color_index_dict = {
0: (0, 0, 255),
1: (0, 255, 0),
2: (255, 0, 0),
3: (255, 255, 0),
4: (255, 0, 255),
5: (0, 255, 255),
6: (255, 128, 0),
7: (128, 128, 255),
8: (0, 255, 128),
9: (128, 128, 0),
}
for index, bbox in enumerate(bboxes):
color_key = index % 10
color = color_index_dict.get(color_key)
# Coordinate must be integer.
bbox = list(map(lambda cor: int(cor), bbox))
# pdb.set_trace()
cv2.rectangle(image, (bbox[0], bbox[1]), (bbox[2], bbox[3]), color, 2)
# 新的图片
cv2.imwrite(output_img, image)
def draw_img_fun(img_id, bboxes):
img_name = "%012d.jpg" % img_id
input_img_dir = FLAGS.img_dir
input_img = os.path.join(input_img_dir, img_name)
boxed_img = os.path.join(BOXED_IMG_DIR, img_name)
draw_image(input_img, bboxes, boxed_img)
def trans_class_id(k):
if k >= 1 and k <= 11:
return k
elif k >= 12 and k <= 24:
return k + 1
elif k >= 25 and k <= 26:
return k + 2
elif k >= 27 and k <= 40:
return k + 4
elif k >= 41 and k <= 60:
return k + 5
elif k == 61:
return k + 6
elif k == 62:
return k + 8
elif k >= 63 and k <= 73:
return k + 9
elif k >= 74 and k <= 80:
return k + 10
def parse_result(img_id, json_content):
obj_list = json.loads(json_content).get("MxpiObject", [])
pic_infer_dict_list = []
bboxes_for_drawing = []
txt_lines_list = []
hratio, wratio = det_restore_ratio.get(img_id, (1, 1))
for o in obj_list:
x0, y0, x1, y1 = (
round(o.get("x0"), 4),
round(o.get("y0"), 4),
round(o.get("x1"), 4),
round(o.get("y1"), 4),
)
# For MAP
bbox_for_map = [
int(x0 * wratio),
int(y0 * hratio),
int((x1 - x0) * wratio),
int((y1 - y0) * hratio),
]
# For drawing bounding box.
bbox_for_drawing = [int(x0), int(y0), int(x1), int(y1)]
# calculation
tmp_list = [
o.get("classVec")[0].get("classId"),
o.get("classVec")[0].get("confidence"),
x0,
y0,
x1,
y1,
]
tmp_list = map(str, tmp_list)
txt_lines_list.append(" ".join(tmp_list))
category_id = o.get("classVec")[0].get("classId") # 1-80, GT:1-90
category_id = trans_class_id(category_id)
score = o.get("classVec")[0].get("confidence")
pic_infer_dict_list.append(
dict(
image_id=img_id,
bbox=bbox_for_map,
category_id=category_id,
score=score,
)
)
if FLAGS.draw_box and score > FLAGS.score_thresh_for_draw:
bboxes_for_drawing.append(bbox_for_drawing[:])
txt_name = "%012d.txt" % img_id
txt_full_name = os.path.join(TXT_DIR, txt_name)
with open(txt_full_name, "w") as fw:
fw.write("\n".join(txt_lines_list))
fw.write("\n")
if FLAGS.draw_box:
draw_img_fun(img_id, bboxes_for_drawing)
return pic_infer_dict_list
def send_img_with_opencv_handled(stream_manager_api, img_file_name):
img = cv2.imread(img_file_name)
height = img.shape[0]
width = img.shape[1]
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = cv2.resize(img, (FLAGS.model_input_width, FLAGS.model_input_height))
img_id = (
int(os.path.basename(img_file_name).split(".")[0])
if FLAGS.coco
else img_file_name
)
"""
height/FLAGS.model_input_height = hx/ DH =>hx = DH * (
height/FLAGS.model_input_height)
"""
det_restore_ratio[img_id] = (
round(height * 1.0 / FLAGS.model_input_height, 4),
round(width * 1.0 / FLAGS.model_input_width, 4),
)
array_bytes = img.tobytes()
data_input = MxDataInput()
data_input.data = array_bytes
key = b"appsrc0"
protobuf_vec = InProtobufVector()
vision_list = MxpiDataType.MxpiVisionList()
vision_vec = vision_list.visionVec.add()
vision_vec.visionInfo.format = 1
vision_vec.visionInfo.width = FLAGS.model_input_width
vision_vec.visionInfo.height = FLAGS.model_input_height
vision_vec.visionInfo.widthAligned = FLAGS.model_input_width
vision_vec.visionInfo.heightAligned = FLAGS.model_input_height
vision_vec.visionData.deviceId = 0
vision_vec.visionData.memType = 0
vision_vec.visionData.dataStr = data_input.data
protobuf = MxProtobufIn()
protobuf.key = key
protobuf.type = b"MxTools.MxpiVisionList"
protobuf.protobuf = vision_list.SerializeToString()
protobuf_vec.push_back(protobuf)
unique_id = stream_manager_api.SendProtobuf(
FLAGS.infer_stream_name.encode("utf8"), 0, protobuf_vec
)
if unique_id < 0:
print("Failed to send data to stream.")
exit()
key_vec = StringVector()
key_vec.push_back(b"mxpi_modelinfer0")
return unique_id
def display_infer_progress(img_num, index, report_file, start_secs):
cur_secs = time.time()
acc_secs = round(cur_secs - start_secs, 4)
real_speed = round((cur_secs - start_secs) * 1000 / (index + 1), 4)
perf_detail = (
f"Inferred: {index + 1}/{img_num} images; "
f"took: {acc_secs} seconds; "
f"average inference speed at: {real_speed} ms/image\n"
)
print(perf_detail)
threading.Thread(
target=write_speed_detail, args=(perf_detail, report_file)
).start()
def write_speed_detail(perf_detail, report_file):
report_file.write(perf_detail)
report_file.flush()
def handle_infer_result(
all_infer_dict_list, img_id, infer_result, img_ext="jpg"
):
if infer_result.errorCode != 0:
print(
"GetResultWithUniqueId error. errorCode=%d, errorMsg=%s"
% (infer_result.errorCode, infer_result.data.decode())
)
exit()
info_json_str = infer_result.data.decode()
with infer_ret_list_lock:
all_infer_dict_list.extend(parse_result(img_id, info_json_str))
def infer_imgs_in_dir_with_open_cv():
input_dir = FLAGS.img_dir
report_file = open(PERF_REPORT_TXT, "a+")
imgs = [
img_name
for img_name in os.listdir(input_dir)
if "boxed" not in img_name
and img_name.lower().endswith((".jpg", ".jpeg"))
]
img_file_names = [
os.path.join(input_dir, img_name)
for img_name in imgs
if "boxed" not in img_name
]
all_infer_dict_list = []
stream_manager_api = prepare_infer_stream()
start_secs = time.time()
img_num = len(img_file_names)
parse_det_threads = []
for index, img_file_name in enumerate(img_file_names):
inferred_cnt = index + 1
send_img_with_opencv_handled(stream_manager_api, img_file_name)
infer_result = stream_manager_api.GetResult(
FLAGS.infer_stream_name.encode("utf8"), 0
)
if inferred_cnt % FLAGS.display_step == 0:
display_infer_progress(img_num, index, report_file, start_secs)
name, ext = os.path.splitext(os.path.basename(img_file_name))
img_id = int(name) if FLAGS.coco else name
t = threading.Thread(
target=handle_infer_result,
args=(all_infer_dict_list, img_id, infer_result, ext),
)
t.start()
parse_det_threads.append(t)
if inferred_cnt >= FLAGS.how_many_images_to_infer > 0:
img_num = inferred_cnt
print(f"Inferred all {inferred_cnt} images to SDK success.")
break
for t in parse_det_threads:
t.join()
finish_secs = time.time()
avg_infer_speed = round((finish_secs - start_secs) * 1000 / img_num, 4)
final_perf = (
f"Infer with OPENCV finished, average speed:{avg_infer_speed} "
f"ms/image for {img_num} images.\n\n"
)
print(final_perf)
report_file.write(final_perf)
report_file.close()
with open(DET_RESULT_JSON, "w") as fw:
fw.write(json.dumps(all_infer_dict_list))
stream_manager_api.DestroyAllStreams()
def send_many_images(stream_manager_api):
input_dir = FLAGS.img_dir
imgs = os.listdir(input_dir)
img_ids = map(lambda img_name: int(img_name.split(".")[0]), imgs)
img_file_names = [
os.path.join(input_dir, img_name)
for img_name in imgs
if "boxed" not in img_name
]
infer_cnt = (
len(img_file_names)
if FLAGS.how_many_images_to_infer == -1
else FLAGS.how_many_images_to_infer
)
start = time.time()
uuid_list = []
for img_file_name in img_file_names[:infer_cnt]:
data_input = MxDataInput()
with open(img_file_name, "rb") as f:
data_input.data = f.read()
in_plugin_id = 0
unique_id = stream_manager_api.SendDataWithUniqueId(
FLAGS.infer_stream_name.encode("utf8"), in_plugin_id, data_input
)
if unique_id < 0:
print("Failed to send data to stream.")
exit()
uuid_list.append(unique_id)
end = time.time()
time_str = (
f"\nSend all images data took: {round((end-start)*1000, 2)} ms\n"
)
print(time_str)
with open(PERF_REPORT_TXT, "a+") as fw:
fw.write(time_str)
return zip(uuid_list, img_ids)
def get_all_images_result(uuid_img_id_zip, stream_manager_api):
start_secs = time.time()
all_infer_dict_list = []
report_file = open(PERF_REPORT_TXT, "a+")
img_num = len(
[
img_name
for img_name in os.listdir(FLAGS.img_dir)
if "boxed" not in img_name
]
)
for index, (uuid, img_id) in enumerate(uuid_img_id_zip):
infer_result = stream_manager_api.GetResultWithUniqueId(
FLAGS.infer_stream_name.encode("utf8"),
uuid,
FLAGS.infer_timeout_secs * 1000,
)
if (index + 1) % FLAGS.display_step == 0:
cur_secs = time.time()
acc_secs = round(cur_secs - start_secs, 4)
real_speed = round((cur_secs - start_secs) * 1000 / (index + 1), 4)
perf_detail = (
f"Inferred: {index + 1}/{img_num} images; "
f"took: {acc_secs} seconds; "
f"average inference speed at: {real_speed} ms/image\n"
)
print(perf_detail)
threading.Thread(
target=write_speed_detail, args=(perf_detail, report_file)
).start()
threading.Thread(
target=parse_infer_result,
args=(all_infer_dict_list, img_id, infer_result),
).start()
finish_secs = time.time()
avg_infer_speed = round((finish_secs - start_secs) * 1000 / img_num, 4)
final_perf = (
f"Infer finished, average speed:{avg_infer_speed} "
f"ms/image for {img_num} images.\n\n"
)
report_file.write(final_perf)
report_file.close()
return all_infer_dict_list
def write_speed_detail(perf_detail, report_file):
report_file.write(perf_detail)
report_file.flush()
def parse_infer_result(all_infer_dict_list, img_id, infer_result):
if infer_result.errorCode != 0:
print(
"GetResultWithUniqueId error. errorCode=%d, errorMsg=%s"
% (infer_result.errorCode, infer_result.data.decode())
)
exit()
info_json_str = infer_result.data.decode()
img_infer_ret = parse_result(img_id, info_json_str)
with infer_ret_list_lock:
all_infer_dict_list.extend(img_infer_ret)
def infer_img(stream_manager_api, input_image, infer_stream_name):
"""Infer one input image with specified stream name configured in
mxManufacture pipeline config file.
:param stream_manager_api:
:param input_image: file name the image to be inferred.
:param infer_stream_name:
:return:
"""
data_input = MxDataInput()
with open(input_image, "rb") as f:
data_input.data = f.read()
in_plugin_id = 0
unique_id = stream_manager_api.SendDataWithUniqueId(
infer_stream_name.encode("utf8"), in_plugin_id, data_input
)
if unique_id < 0:
print("Failed to send data to stream.")
exit()
# Obtain the inference result by specifying streamName and unique_id.
infer_result = stream_manager_api.GetResultWithUniqueId(
infer_stream_name.encode("utf8"), unique_id, 3000
)
end = time.time()
print(f"Infer time: {end} s.")
if infer_result.errorCode != 0:
print(
"GetResultWithUniqueId error. errorCode=%d, errorMsg=%s"
% (infer_result.errorCode, infer_result.data.decode())
)
exit()
info_json_str = infer_result.data.decode()
img_id = int(os.path.basename(input_image).split(".")[0])
return parse_result(img_id, info_json_str)
def prepare_infer_stream():
# init stream manager
stream_manager_api = StreamManagerApi()
ret = stream_manager_api.InitManager()
if ret != 0:
print("Failed to init Stream manager, ret=%s" % str(ret))
exit()
# create streams by pipeline config file
with open(FLAGS.pipeline_config, "rb") as f:
pipelineStr = f.read()
ret = stream_manager_api.CreateMultipleStreams(pipelineStr)
if ret != 0:
print("Failed to create Stream, ret=%s" % str(ret))
exit()
return stream_manager_api
def infer_imgs():
stream_manager_api = prepare_infer_stream()
uuid_img_id_zip = send_many_images(stream_manager_api)
all_infer_dict_list = get_all_images_result(
uuid_img_id_zip, stream_manager_api
)
with open(DET_RESULT_JSON, "w") as fw:
fw.write(json.dumps(all_infer_dict_list))
stream_manager_api.DestroyAllStreams()
def main(unused_arg):
global BOXED_IMG_DIR
global TXT_DIR
global PERF_REPORT_TXT
global DET_RESULT_JSON
"""
output_dir
|_boxed_imgs
|_txts
|_per_report_npu.txt
|_det_result_npu.json
"""
BOXED_IMG_DIR = os.path.join(FLAGS.output_dir, "boxed_imgs")
TXT_DIR = os.path.join(FLAGS.output_dir, "txts")
PERF_REPORT_TXT = os.path.join(FLAGS.output_dir, "om_perf_report.txt")
DET_RESULT_JSON = os.path.join(FLAGS.output_dir, "om_det_result.json")
if not os.path.exists(FLAGS.output_dir):
os.makedirs(FLAGS.output_dir)
if not os.path.exists(TXT_DIR):
os.makedirs(TXT_DIR)
if FLAGS.draw_box and not os.path.exists(BOXED_IMG_DIR):
os.makedirs(BOXED_IMG_DIR)
now_time_str = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
head_info = f"{'-'*50}Perf Test On NPU starts @ {now_time_str}{'-'*50}\n"
with open(PERF_REPORT_TXT, "a+") as fw:
fw.write(head_info)
if FLAGS.preprocess == "DVPP":
print("Start DVPP infer pert testing...")
infer_imgs()
else:
print("Start OpenCV infer pert testing...")
infer_imgs_in_dir_with_open_cv()
end_time_str = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
tail_info = f"{'-'*50}Perf Test On NPU ends @ {end_time_str}{'-'*50}\n"
with open(PERF_REPORT_TXT, "a+") as fw:
fw.write(tail_info)
if __name__ == "__main__":
app.run(main)

View File

@ -0,0 +1,24 @@
#!/bin/bash
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
PY=/usr/bin/python3.7
export PYTHONPATH=${PYTHONPATH}:.
${PY} generate_map_report.py \
--annotations_json=/home/dataset/coco2017/annotations/instances_val2017.json \
--det_result_json=/home/sam/codes/SSD_MobileNet_FPN_for_MindSpore/infer/sdk/perf/om_infer_output_on_coco_val2017/om_det_result.json \
--output_path_name=./map_output/map.txt \
--anno_type=bbox

View File

@ -0,0 +1,46 @@
#!/bin/bash
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
CUR_PATH=$(cd "$(dirname "$0")" || { warn "Failed to check path/to/run.sh" ; exit ; } ; pwd)
# Simple log helper functions
info() { echo -e "\033[1;34m[INFO ][MxStream] $1\033[1;37m" ; }
warn() { echo >&2 -e "\033[1;31m[WARN ][MxStream] $1\033[1;37m" ; }
PY_PATH=/usr/bin/python3.7
export MX_SDK_HOME=/home/sam/mxManufacture
#export ASCEND_AICPU_PATH=/usr/local/Ascend/ascend-toolkit/20.2.rc1/arm64-linux
export LD_LIBRARY_PATH=${MX_SDK_HOME}/lib:${MX_SDK_HOME}/opensource/lib:${LD_LIBRARY_PATH}
export PYTHONPATH=${MX_SDK_HOME}/python:${PYTHONPATH}
export GST_PLUGIN_PATH=${MX_SDK_HOME}/opensource/lib/gstreamer-1.0:${MX_SDK_HOME}/lib/plugins
export GST_PLUGIN_SCANNER=${MX_SDK_HOME}/opensource/libexec/gstreamer-1.0/gst-plugin-scanner
# to set PYTHONPATH, import the StreamManagerApi.py
export PYTHONPATH=${PYTHONPATH}:${MX_SDK_HOME}/python
${PY_PATH} om_infer_perf_test.py \
--img_dir=/home/sam/dataset/coco2017/val2017 \
--how_many_images_to_infer=-1 \
--pipeline_config=/home/sam/codes/SSD_for_MindSpore/infer/sdk/conf/ssd_mobilenet_fpn_ms_coco_opencv.pipeline \
--infer_stream_name=detection \
--output_dir=./om_infer_output_on_coco_val2017_opencv \
--infer_timeout_secs=5 \
--display_step=100 \
--draw_box=true \
--score_thresh_for_draw=0.5