diff --git a/model_zoo/official/cv/yolov3_darknet53/README.md b/model_zoo/official/cv/yolov3_darknet53/README.md index 30e9752c910..3ea30ac6bbc 100644 --- a/model_zoo/official/cv/yolov3_darknet53/README.md +++ b/model_zoo/official/cv/yolov3_darknet53/README.md @@ -390,11 +390,12 @@ This the standard format from `pycocotools`, you can refer to [cocodataset](http Currently, batchsize can only set to 1. ```shell -python export.py --ckpt_file [CKPT_PATH] --file_name [FILE_NAME] --file_format [FILE_FORMAT] +python export.py --ckpt_file [CKPT_PATH] --file_name [FILE_NAME] --file_format [FILE_FORMAT] --keep_detect [Bool] ``` The ckpt_file parameter is required, `EXPORT_FORMAT` should be in ["AIR", "MINDIR"] +`keep_detect` keep the detect module or not, default: True ### [Inference Process](#contents) diff --git a/model_zoo/official/cv/yolov3_darknet53/README_CN.md b/model_zoo/official/cv/yolov3_darknet53/README_CN.md index 2c51eb9b8c6..556c3aaac3e 100644 --- a/model_zoo/official/cv/yolov3_darknet53/README_CN.md +++ b/model_zoo/official/cv/yolov3_darknet53/README_CN.md @@ -383,10 +383,11 @@ bash run_eval.sh dataset/coco2014/ checkpoint/0-319_102400.ckpt ## 导出mindir模型 ```shell -python export.py --ckpt_file [CKPT_PATH] --file_name [FILE_NAME] --file_format [FILE_FORMAT] +python export.py --ckpt_file [CKPT_PATH] --file_name [FILE_NAME] --file_format [FILE_FORMAT] --keep_detect [Bool] ``` 参数`ckpt_file` 是必需的,`EXPORT_FORMAT` 必须在 ["AIR", "MINDIR"]中进行选择。 +参数`keep_detect` 是否保留坐标检测模块, 默认为True ## 推理过程 diff --git a/model_zoo/official/cv/yolov3_darknet53/default_config.yaml b/model_zoo/official/cv/yolov3_darknet53/default_config.yaml index 12b30075ec6..e949d1d48db 100644 --- a/model_zoo/official/cv/yolov3_darknet53/default_config.yaml +++ b/model_zoo/official/cv/yolov3_darknet53/default_config.yaml @@ -73,7 +73,7 @@ batch_size: 1 ckpt_file: "" file_name: "yolov3_darknet53" file_format: "AIR" # ["AIR", "ONNX", "MINDIR"] - +keep_detect: True # convert weight option input_file: "./darknet53.conv.74" @@ -170,6 +170,7 @@ ckpt_file: "Checkpoint file path." file_name: "output file name." file_format: "file format choices in ['AIR', 'ONNX', 'MINDIR']" device_target: "device target. choices in ['Ascend', 'GPU'] for train. choices in ['Ascend', 'GPU', 'CPU'] for export." +keep_detect: "keep the detect module or not, default: True" # convert weight option input_file: "input file path." diff --git a/model_zoo/official/cv/yolov3_darknet53/src/yolo.py b/model_zoo/official/cv/yolov3_darknet53/src/yolo.py index 6fa9543b42f..b5cee676427 100644 --- a/model_zoo/official/cv/yolov3_darknet53/src/yolo.py +++ b/model_zoo/official/cv/yolov3_darknet53/src/yolo.py @@ -364,6 +364,7 @@ class YOLOV3DarkNet53(nn.Cell): def __init__(self, is_training, config=default_config): super(YOLOV3DarkNet53, self).__init__() self.config = config + self.keep_detect = self.config.keep_detect self.tenser_to_array = P.TupleToArray() # YOLOv3 network @@ -383,6 +384,8 @@ class YOLOV3DarkNet53(nn.Cell): input_shape = F.shape(x)[2:4] input_shape = F.cast(self.tenser_to_array(input_shape), ms.float32) big_object_output, medium_object_output, small_object_output = self.feature_map(x) + if not self.keep_detect: + return big_object_output, medium_object_output, small_object_output output_big = self.detect_1(big_object_output, input_shape) output_me = self.detect_2(medium_object_output, input_shape) output_small = self.detect_3(small_object_output, input_shape) diff --git a/model_zoo/official/cv/yolov4/README.md b/model_zoo/official/cv/yolov4/README.md index 5412ac32e36..bcfa41ab6e5 100644 --- a/model_zoo/official/cv/yolov4/README.md +++ b/model_zoo/official/cv/yolov4/README.md @@ -488,11 +488,12 @@ overall performance If you want to infer the network on Ascend 310, you should convert the model to MINDIR: ```python -python export.py --ckpt_file [CKPT_PATH] --file_name [FILE_NAME] --file_format [FILE_FORMAT] +python export.py --ckpt_file [CKPT_PATH] --file_name [FILE_NAME] --file_format [FILE_FORMAT] --keep_detect [Bool] ``` The ckpt_file parameter is required, `EXPORT_FORMAT` should be in ["AIR", "ONNX", "MINDIR"] +`keep_detect` keep the detect module or not, default: True ## [Inference Process](#contents) diff --git a/model_zoo/official/cv/yolov4/default_config.yaml b/model_zoo/official/cv/yolov4/default_config.yaml index 454fcf90d0a..aeaeab0f635 100644 --- a/model_zoo/official/cv/yolov4/default_config.yaml +++ b/model_zoo/official/cv/yolov4/default_config.yaml @@ -64,7 +64,7 @@ testing_shape: 608 ckpt_file: "" file_name: "yolov4" file_format: "AIR" - +keep_detect: True # Other default config hue: 0.1 @@ -162,4 +162,5 @@ batch_size: "batch size for export" testing_shape: "shape for test" ckpt_file: "Checkpoint file path for export" file_name: "output file name for export" -file_format: "file format for export" \ No newline at end of file +file_format: "file format for export" +keep_detect: "keep the detect module or not, default: True" \ No newline at end of file diff --git a/model_zoo/official/cv/yolov4/src/yolo.py b/model_zoo/official/cv/yolov4/src/yolo.py index 9701226816c..626abf4fd20 100644 --- a/model_zoo/official/cv/yolov4/src/yolo.py +++ b/model_zoo/official/cv/yolov4/src/yolo.py @@ -432,6 +432,7 @@ class YOLOV4CspDarkNet53(nn.Cell): def __init__(self): super(YOLOV4CspDarkNet53, self).__init__() self.config = default_config + self.keep_detect = self.config.keep_detect self.test_img_shape = Tensor(tuple(self.config.test_img_shape), ms.float32) # YOLOv4 network @@ -448,6 +449,8 @@ class YOLOV4CspDarkNet53(nn.Cell): if input_shape is None: input_shape = self.test_img_shape big_object_output, medium_object_output, small_object_output = self.feature_map(x) + if not self.keep_detect: + return big_object_output, medium_object_output, small_object_output output_big = self.detect_1(big_object_output, input_shape) output_me = self.detect_2(medium_object_output, input_shape) output_small = self.detect_3(small_object_output, input_shape)