!42242 Fix the problem of alarm clearing
Merge pull request !42242 from 刘勇琪/master
This commit is contained in:
commit
8ee36ea455
|
@ -124,10 +124,11 @@ DataType::DataType(const std::string &type_str) {
|
|||
}
|
||||
|
||||
std::string DataType::ToString() const {
|
||||
if (type_ < DataType::NUM_OF_TYPES)
|
||||
if (type_ < DataType::NUM_OF_TYPES) {
|
||||
return kTypeInfo[type_].name_;
|
||||
else
|
||||
} else {
|
||||
return "unknown";
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef ENABLE_PYTHON
|
||||
|
|
|
@ -300,7 +300,9 @@ Status BatchOp::MapColumns(std::pair<std::unique_ptr<TensorQTable>, CBatchInfo>
|
|||
TensorTable in_cols(in_col_names_.size(), TensorRow(num_rows, nullptr)), out_cols;
|
||||
|
||||
std::unordered_map<std::string, size_t> in_col_name_id; // name of columns that need to be fed to per-batch_map
|
||||
for (size_t i = 0; i < in_col_names_.size(); i++) in_col_name_id.insert({in_col_names_[i], i});
|
||||
for (size_t i = 0; i < in_col_names_.size(); i++) {
|
||||
in_col_name_id.insert({in_col_names_[i], i});
|
||||
}
|
||||
|
||||
for (const auto &itr : child_map_) {
|
||||
auto col_itr = in_col_name_id.find(itr.first);
|
||||
|
|
|
@ -135,7 +135,9 @@ Status BuildVocabOp::CollectorThread() {
|
|||
RETURN_IF_NOT_OK(collector_queue_->PopFront(&wrkr_map));
|
||||
RETURN_UNEXPECTED_IF_NULL(wrkr_map);
|
||||
if (!wrkr_map->empty()) {
|
||||
for (const auto &wd : *wrkr_map) word_cnt_[wd.first] += wd.second;
|
||||
for (const auto &wd : *wrkr_map) {
|
||||
word_cnt_[wd.first] += wd.second;
|
||||
}
|
||||
} else {
|
||||
++num_quited_worker;
|
||||
}
|
||||
|
|
|
@ -35,8 +35,8 @@ const int32_t kQMnistLabelFileMagicNumber = 3074;
|
|||
const size_t kQMnistImageRows = 28;
|
||||
const size_t kQMnistImageCols = 28;
|
||||
const size_t kQMnistLabelLength = 8;
|
||||
uint32_t kNum4 = 4;
|
||||
uint32_t kNum12 = 12;
|
||||
const uint32_t kNum4 = 4;
|
||||
const uint32_t kNum12 = 12;
|
||||
|
||||
QMnistOp::QMnistOp(const std::string &folder_path, const std::string &usage, bool compat,
|
||||
std::unique_ptr<DataSchema> data_schema, std::shared_ptr<SamplerRT> sampler, int32_t num_workers,
|
||||
|
|
|
@ -73,7 +73,9 @@ Status AutoWorkerPass::RunOnTree(std::shared_ptr<DatasetNode> root_ir, bool *con
|
|||
int32_t cur_node_num_worker = std::max(std::min(num_workers, cur_node_max), min_num_workers_);
|
||||
|
||||
// if the num_worker to set is same as original, skip setting and printing the logs
|
||||
if (cur_node_num_worker == p.first->NumWorkers()) continue;
|
||||
if (cur_node_num_worker == p.first->NumWorkers()) {
|
||||
continue;
|
||||
}
|
||||
// log the change via warning msg so user can see what the num_worker is being set for which op
|
||||
MS_LOG(WARNING) << "AutoNumWorker enabled, num_workers in " << p.first->Name() << " is auto-adjusted from "
|
||||
<< std::to_string(p.first->NumWorkers()) + " to " + std::to_string(cur_node_num_worker);
|
||||
|
|
|
@ -122,8 +122,12 @@ Status DvppDecodeResizeJpegOp::OutputShape(const std::vector<TensorShape> &input
|
|||
if (inputs.size() < 1) {
|
||||
RETURN_STATUS_UNEXPECTED("DvppDecodeResizeJpegOp::OutputShape inputs is null");
|
||||
}
|
||||
if (inputs[0].Rank() == 1) outputs.emplace_back(out);
|
||||
if (!outputs.empty()) return Status::OK();
|
||||
if (inputs[0].Rank() == 1) {
|
||||
outputs.emplace_back(out);
|
||||
}
|
||||
if (!outputs.empty()) {
|
||||
return Status::OK();
|
||||
}
|
||||
return Status(StatusCode::kMDUnexpectedError, "Input has a wrong shape");
|
||||
}
|
||||
|
||||
|
|
|
@ -776,10 +776,11 @@ Status MaskWithTensor(const std::shared_ptr<Tensor> &sub_mat, std::shared_ptr<Te
|
|||
|
||||
Status CopyTensorValue(const std::shared_ptr<Tensor> &source_tensor, std::shared_ptr<Tensor> *dest_tensor,
|
||||
const std::vector<int64_t> &source_indx, const std::vector<int64_t> &dest_indx) {
|
||||
if (source_tensor->type() != (*dest_tensor)->type())
|
||||
if (source_tensor->type() != (*dest_tensor)->type()) {
|
||||
RETURN_STATUS_UNEXPECTED(
|
||||
"CutMixBatch: CopyTensorValue failed: "
|
||||
"source and destination tensor must have the same type.");
|
||||
}
|
||||
if (source_tensor->type() == DataType::DE_UINT8) {
|
||||
uint8_t pixel_value = 0;
|
||||
RETURN_IF_NOT_OK(source_tensor->GetItemAt(&pixel_value, source_indx));
|
||||
|
|
|
@ -26,7 +26,7 @@ RandAugmentOp::RandAugmentOp(int32_t num_ops, int32_t magnitude, int32_t num_mag
|
|||
magnitude_(magnitude),
|
||||
num_magnitude_bins_(num_magnitude_bins),
|
||||
interpolation_(interpolation),
|
||||
fill_value_(std::move(fill_value)) {
|
||||
fill_value_(fill_value) {
|
||||
rnd_.seed(GetSeed());
|
||||
}
|
||||
|
||||
|
|
|
@ -1441,8 +1441,7 @@ void ShardReader::ConsumerByRow(int consumer_id) {
|
|||
if (interrupt_) {
|
||||
return;
|
||||
}
|
||||
delivery_map_[sample_id_pos] =
|
||||
std::make_shared<std::vector<std::tuple<std::vector<uint8_t>, json>>>(std::move(batch));
|
||||
delivery_map_[sample_id_pos] = std::make_shared<std::vector<std::tuple<std::vector<uint8_t>, json>>>(batch);
|
||||
}
|
||||
cv_iterator_.notify_one();
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue