!29700 [MD] Tidy up UT and device queue info messages

Merge pull request !29700 from cathwong/ckw_device_queue_probes
This commit is contained in:
i-robot 2022-02-09 12:47:27 +00:00 committed by Gitee
commit 5858a06689
No known key found for this signature in database
GPG Key ID: 173E9B9CA92EEF8F
4 changed files with 15 additions and 12 deletions

View File

@ -1,5 +1,5 @@
/** /**
* Copyright 2019-2021 Huawei Technologies Co., Ltd * Copyright 2019-2022 Huawei Technologies Co., Ltd
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -285,7 +285,7 @@ Status DeviceQueueOp::SendDataToAscend() {
send_finished_ = true; send_finished_ = true;
} }
tree_->SetFinished(); tree_->SetFinished();
MS_LOG(INFO) << "Device queue send " << send_batch << " batch."; MS_LOG(INFO) << "ExecutionTree finished. Device queue sent number of batches: " << send_batch;
return Status::OK(); return Status::OK();
} }
@ -505,7 +505,7 @@ Status DeviceQueueOp::PushDataToGPU() {
send_finished_ = true; send_finished_ = true;
} }
tree_->SetFinished(); tree_->SetFinished();
MS_LOG(INFO) << "Device queue send " << send_batch << " batch."; MS_LOG(INFO) << "ExecutionTree finished. Device queue pushed number of batches: " << send_batch;
GpuBufferMgr::GetInstance().Close(handle); GpuBufferMgr::GetInstance().Close(handle);
GpuBufferMgr::GetInstance().CloseConfirm(); GpuBufferMgr::GetInstance().CloseConfirm();
@ -581,7 +581,7 @@ Status DeviceQueueOp::WorkerEntry(int32_t worker_id) {
RETURN_IF_NOT_OK(receive_queues_[worker_id]->PopFront(&current_row)); RETURN_IF_NOT_OK(receive_queues_[worker_id]->PopFront(&current_row));
} }
MS_LOG(INFO) << "Device queue worker id " << worker_id << "proc " << batch_num << "batch."; MS_LOG(INFO) << "Device queue worker id " << worker_id << " processed number of batches: " << batch_num;
// Add empty data_item vector with eoe_flag=false as quit flag. // Add empty data_item vector with eoe_flag=false as quit flag.
GpuConnectorItem connector_item = {{}, false}; GpuConnectorItem connector_item = {{}, false};
RETURN_IF_NOT_OK(gpu_connector_->Add(worker_id, std::move(connector_item))); RETURN_IF_NOT_OK(gpu_connector_->Add(worker_id, std::move(connector_item)));
@ -641,7 +641,7 @@ Status DeviceQueueOp::SendDataToGPU() {
RETURN_IF_NOT_OK(receive_queues_[num_buf++ % num_workers_]->Add(std::move(quit_flag))); RETURN_IF_NOT_OK(receive_queues_[num_buf++ % num_workers_]->Add(std::move(quit_flag)));
} }
MS_LOG(INFO) << "Device queue receive " << num_buf - num_workers_ << " batch."; MS_LOG(INFO) << "Device queue received number of batches and EOEs: " << (num_buf - num_workers_);
return Status::OK(); return Status::OK();
} }

View File

@ -1,4 +1,4 @@
# Copyright 2019 Huawei Technologies Co., Ltd # Copyright 2019-2022 Huawei Technologies Co., Ltd
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. # you may not use this file except in compliance with the License.
@ -240,9 +240,11 @@ def test_batch_12():
def test_batch_13(): def test_batch_13():
""" """
Test batch: python_multiprocessing is True and does not work for per_batch_map is None Feature: Batch op
Description: Test python_multiprocessing is True with per_batch_map is None
Expectation: python_multiprocessing is True is ignored when per_batch_map is None
""" """
logger.info("test_batch_12") logger.info("test_batch_13")
# define parameters # define parameters
batch_size = True batch_size = True

View File

@ -1,4 +1,4 @@
# Copyright 2020 Huawei Technologies Co., Ltd # Copyright 2020-2022 Huawei Technologies Co., Ltd
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. # you may not use this file except in compliance with the License.
@ -268,6 +268,7 @@ if __name__ == "__main__":
test_textline_dataset_num_samples() test_textline_dataset_num_samples()
test_textline_dataset_distribution() test_textline_dataset_distribution()
test_textline_dataset_repeat() test_textline_dataset_repeat()
test_textline_dataset_output_tensor()
test_textline_dataset_get_datasetsize() test_textline_dataset_get_datasetsize()
test_textline_dataset_to_device() test_textline_dataset_to_device()
test_textline_dataset_exceptions() test_textline_dataset_exceptions()

View File

@ -1,4 +1,5 @@
# Copyright 2019 Huawei Technologies Co., Ltd # Copyright 2019i-2022 Huawei Technologies Co., Ltd
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. # you may not use this file except in compliance with the License.
@ -232,14 +233,13 @@ def test_sync_exception_04():
def test_sync_exception_05(): def test_sync_exception_05():
""" """
Test sync: with wrong batch size in update Test sync: with wrong condition name in update
""" """
logger.info("test_sync_exception_05") logger.info("test_sync_exception_05")
dataset = ds.GeneratorDataset(gen, column_names=["input"]) dataset = ds.GeneratorDataset(gen, column_names=["input"])
count = 0 count = 0
aug = Augment(0) aug = Augment(0)
# try to create dataset with batch_size < 0
dataset = dataset.sync_wait(condition_name="every batch", callback=aug.update) dataset = dataset.sync_wait(condition_name="every batch", callback=aug.update)
dataset = dataset.map(operations=[aug.preprocess], input_columns=["input"]) dataset = dataset.map(operations=[aug.preprocess], input_columns=["input"])
with pytest.raises(RuntimeError) as e: with pytest.raises(RuntimeError) as e: