!23202 codedex and pclint warning fix

Merge pull request !23202 from zetongzhao/warn_fix
This commit is contained in:
i-robot 2021-09-10 20:01:22 +00:00 committed by Gitee
commit cdde54a146
11 changed files with 13 additions and 17 deletions

View File

@ -64,8 +64,7 @@ BatchOp::BatchOp(int32_t batch_size, bool drop, bool pad, int32_t op_queue_size,
batch_num_(0),
batch_cnt_(0) {
// Adjust connector queue size. After batch each row is batch_size times larger
int32_t queue_size;
queue_size = std::max(1, op_queue_size / start_batch_size_);
int32_t queue_size = std::max(1, op_queue_size / start_batch_size_);
if (num_workers == 1) {
// ensure there is at least 2 queue slots for whole operation.. If only 1 worker, incrase it to 2
queue_size = std::max(2, queue_size);
@ -85,8 +84,7 @@ BatchOp::BatchOp(int32_t batch_size, bool drop, bool pad, int32_t op_queue_size,
pad_info_(pad_map),
batch_num_(0),
batch_cnt_(0) {
int32_t queue_size;
queue_size = std::max(1, op_queue_size / start_batch_size_);
int32_t queue_size = std::max(1, op_queue_size / start_batch_size_);
if (num_workers == 1) {
// ensure there is at least 2 queue slots for whole operation.. If only 1 worker, incrase it to 2
queue_size = std::max(2, queue_size);

View File

@ -177,6 +177,7 @@ int CsvOp::CsvParser::PutRow(int c) {
Status s = rows_connector_->Add(worker_id_, std::move(cur_row_));
if (s.IsError()) {
err_message_ = s.ToString();
// if error type is interrupted, return error code -2
if (s.StatusCode() == kMDInterrupted) return -2;
return -1;
}
@ -475,6 +476,7 @@ Status CsvOp::LoadFile(const std::string &file, int64_t start_offset, int64_t en
int chr = ifs.get();
int err = csv_parser.ProcessMessage(chr);
if (err != 0) {
// if error code is -2, the returned error is interrupted
if (err == -2) return Status(kMDInterrupted);
RETURN_STATUS_UNEXPECTED("Invalid file, failed to parse file: " + file + ": line " +
std::to_string(csv_parser.GetTotalRows() + 1) +

View File

@ -25,7 +25,7 @@
namespace mindspore {
namespace dataset {
// Construct ZipOp here, local variables initialized in operator due to tree construction restrictions
ZipOp::ZipOp(int32_t op_connector_size) : PipelineOp(0) {}
ZipOp::ZipOp() : PipelineOp(0) {}
// destructor
ZipOp::~ZipOp() {}

View File

@ -34,7 +34,7 @@ class ZipOp : public PipelineOp {
public:
// Constructor for ZipOp
// @param op_connector_size - connector size
explicit ZipOp(int32_t op_connector_size);
ZipOp();
// Destructor
~ZipOp();

View File

@ -125,7 +125,7 @@ Status ManifestNode::GetDatasetSize(const std::shared_ptr<DatasetSizeGetter> &si
RETURN_IF_NOT_OK(Build(&ops));
CHECK_FAIL_RETURN_UNEXPECTED(!ops.empty(), "Unable to build op.");
auto op = std::dynamic_pointer_cast<ManifestOp>(ops.front());
op->CountTotalRows(&num_rows);
RETURN_IF_NOT_OK(op->CountTotalRows(&num_rows));
std::shared_ptr<SamplerRT> sampler_rt = nullptr;
RETURN_IF_NOT_OK(sampler_->SamplerBuild(&sampler_rt));
sample_size = sampler_rt->CalculateNumSamples(num_rows);

View File

@ -59,7 +59,7 @@ Status ZipNode::ValidateParams() {
}
Status ZipNode::Build(std::vector<std::shared_ptr<DatasetOp>> *const node_ops) {
auto op = std::make_shared<ZipOp>(connector_que_size_);
auto op = std::make_shared<ZipOp>();
op->set_total_repeats(GetTotalRepeats());
op->set_num_repeats_per_epoch(GetNumRepeatsPerEpoch());
node_ops->push_back(op);

View File

@ -32,9 +32,6 @@ Status TreeAdapterLite::BuildExecutionTreeRecur(std::shared_ptr<DatasetNode> ir,
(*op) = ops.front(); // return the first op to be added as child by the caller of this function
if (op == NULL) {
return StatusCode::kLiteNullptr;
}
for (size_t i = 0; i < ops.size(); i++) {
RETURN_IF_NOT_OK(tree_->AssociateNode(ops[i]));
if (i > 0) {

View File

@ -616,11 +616,11 @@ Status CopyTensorValue(const std::shared_ptr<Tensor> &source_tensor, std::shared
"CutMixBatch: CopyTensorValue failed: "
"source and destination tensor must have the same type.");
if (source_tensor->type() == DataType::DE_UINT8) {
uint8_t pixel_value;
uint8_t pixel_value = 0;
RETURN_IF_NOT_OK(source_tensor->GetItemAt(&pixel_value, source_indx));
RETURN_IF_NOT_OK((*dest_tensor)->SetItemAt(dest_indx, pixel_value));
} else if (source_tensor->type() == DataType::DE_FLOAT32) {
float pixel_value;
float pixel_value = 0;
RETURN_IF_NOT_OK(source_tensor->GetItemAt(&pixel_value, source_indx));
RETURN_IF_NOT_OK((*dest_tensor)->SetItemAt(dest_indx, pixel_value));
} else {

View File

@ -89,6 +89,5 @@ Status PluginOp::Init() {
CHECK_FAIL_RETURN_UNEXPECTED(rc.IsOk(), rc.ToString());
return Status::OK();
}
} // namespace dataset
} // namespace mindspore

View File

@ -58,7 +58,7 @@ class _SharedQueue(multiprocessing.queues.Queue):
for _ in range(self.num_seg):
a = multiprocessing.Array("b", self.seg_size)
self.shm_list.append(a)
except:
except Exception:
raise RuntimeError(
"_SharedQueue: Error allocating "
+ str(self.seg_size)

View File

@ -75,7 +75,7 @@ TEST_F(MindDataTestZipOp, MindDataTestZipOpDefault) {
EXPECT_TRUE(rc.IsOk());
// Creating DatasetOp
std::shared_ptr<ZipOp> zip_op = std::make_shared<ZipOp>(op_connector_size);
std::shared_ptr<ZipOp> zip_op = std::make_shared<ZipOp>();
rc = my_tree->AssociateNode(zip_op);
EXPECT_TRUE(rc.IsOk());
@ -159,7 +159,7 @@ TEST_F(MindDataTestZipOp, MindDataTestZipOpRepeat) {
rc = my_tree->AssociateNode(my_tfreader_op2);
EXPECT_TRUE(rc.IsOk());
// Creating DatasetOp
std::shared_ptr<ZipOp> zip_op = std::make_shared<ZipOp>(op_connector_size);
std::shared_ptr<ZipOp> zip_op = std::make_shared<ZipOp>();
rc = my_tree->AssociateNode(zip_op);
EXPECT_TRUE(rc.IsOk());
my_tfreader_op->set_total_repeats(num_repeats);