forked from mindspore-Ecosystem/mindspore
fix fasterrcnn performance issue
This commit is contained in:
parent
ffaf33d5d6
commit
89669a5725
|
@ -129,6 +129,10 @@ config = ed({
|
|||
"keep_checkpoint_max": 10,
|
||||
"save_checkpoint_path": "./",
|
||||
|
||||
# Number of threads used to process the dataset in parallel
|
||||
"num_parallel_workers": 8,
|
||||
# Parallelize Python operations with multiple worker processes
|
||||
"python_multiprocessing": True,
|
||||
"mindrecord_dir": "../MindRecord_COCO_TRAIN",
|
||||
"coco_root": "./cocodataset/",
|
||||
"train_data_type": "train2017",
|
||||
|
|
|
@ -457,7 +457,7 @@ def data_to_mindrecord_byte_image(dataset="coco", is_training=True, prefix="fast
|
|||
|
||||
|
||||
def create_fasterrcnn_dataset(mindrecord_file, batch_size=2, device_num=1, rank_id=0, is_training=True,
|
||||
num_parallel_workers=8):
|
||||
num_parallel_workers=8, python_multiprocessing=False):
|
||||
"""Create FasterRcnn dataset with MindDataset."""
|
||||
cv2.setNumThreads(0)
|
||||
de.config.set_prefetch_size(8)
|
||||
|
@ -471,7 +471,7 @@ def create_fasterrcnn_dataset(mindrecord_file, batch_size=2, device_num=1, rank_
|
|||
ds = ds.map(input_columns=["image", "annotation"],
|
||||
output_columns=["image", "image_shape", "box", "label", "valid_num"],
|
||||
column_order=["image", "image_shape", "box", "label", "valid_num"],
|
||||
operations=compose_map_func, python_multiprocessing=False,
|
||||
operations=compose_map_func, python_multiprocessing=python_multiprocessing,
|
||||
num_parallel_workers=num_parallel_workers)
|
||||
ds = ds.batch(batch_size, drop_remainder=True)
|
||||
else:
|
||||
|
|
|
@ -113,7 +113,9 @@ if __name__ == '__main__':
|
|||
|
||||
# When create MindDataset, using the fitst mindrecord file, such as FasterRcnn.mindrecord0.
|
||||
dataset = create_fasterrcnn_dataset(mindrecord_file, batch_size=config.batch_size,
|
||||
device_num=device_num, rank_id=rank)
|
||||
device_num=device_num, rank_id=rank,
|
||||
num_parallel_workers=config.num_parallel_workers,
|
||||
python_multiprocessing=config.python_multiprocessing)
|
||||
|
||||
dataset_size = dataset.get_dataset_size()
|
||||
print("Create dataset done!")
|
||||
|
|
|
@ -70,7 +70,7 @@ function preprocess_data()
|
|||
|
||||
function compile_app()
|
||||
{
|
||||
cd ../ascend310_infer/ || exit
|
||||
cd ../ascend310_infer/src || exit
|
||||
if [ -f "Makefile" ]; then
|
||||
make clean
|
||||
fi
|
||||
|
|
|
@ -328,6 +328,6 @@ if __name__ == '__main__':
|
|||
log_path = os.path.join(arg.ckpt_path, 'logs')
|
||||
arg.logger = get_logger(log_path, arg.local_rank)
|
||||
|
||||
arg.logger.info('Config: {}'.format(pformat(arg)))
|
||||
arg.logger.info('Config: %s', pformat(arg))
|
||||
|
||||
main(arg)
|
||||
|
|
|
@ -90,13 +90,13 @@ def main():
|
|||
cfg.logger.info('start create dataloader')
|
||||
de_dataset, steps_per_epoch, class_num = get_de_dataset(cfg)
|
||||
cfg.steps_per_epoch = steps_per_epoch
|
||||
cfg.logger.info('step per epoch: {}'.format(cfg.steps_per_epoch))
|
||||
cfg.logger.info('step per epoch: %s', cfg.steps_per_epoch)
|
||||
de_dataloader = de_dataset.create_tuple_iterator()
|
||||
cfg.logger.info('class num original: {}'.format(class_num))
|
||||
cfg.logger.info('class num original: %s', class_num)
|
||||
if class_num % 16 != 0:
|
||||
class_num = (class_num // 16 + 1) * 16
|
||||
cfg.class_num = class_num
|
||||
cfg.logger.info('change the class num to: {}'.format(cfg.class_num))
|
||||
cfg.logger.info('change the class num to: %s', cfg.class_num)
|
||||
cfg.logger.info('end create dataloader')
|
||||
|
||||
# backbone and loss
|
||||
|
|
Loading…
Reference in New Issue