forked from mindspore-Ecosystem/mindspore
!20119 Fix Codex, Codecheck, Shellcheck, Pclint issues for master
Merge pull request !20119 from lixiachen/July_codex_fix
This commit is contained in:
commit
43d8687102
|
@ -129,7 +129,7 @@ Status CacheAdminArgHandler::AssignArg(const std::string &option, std::vector<ui
|
|||
*arg_stream >> value_as_uint;
|
||||
if (arg_stream->fail()) {
|
||||
arg_stream->clear();
|
||||
arg_stream->seekg(pos, arg_stream->beg);
|
||||
(void)arg_stream->seekg(pos, std::ios::beg);
|
||||
break;
|
||||
} else {
|
||||
out_arg->push_back(value_as_uint);
|
||||
|
|
|
@ -59,6 +59,7 @@ Status StorageManager::DoServiceStart() {
|
|||
writable_containers_pool_.reserve(pool_size_);
|
||||
if (root_.IsDirectory()) {
|
||||
// create multiple containers and store their index in a pool
|
||||
CHECK_FAIL_RETURN_UNEXPECTED(pool_size_ > 0, "Expect positive pool_size_, but got:" + std::to_string(pool_size_));
|
||||
for (int i = 0; i < pool_size_; i++) {
|
||||
RETURN_IF_NOT_OK(AddOneContainer());
|
||||
}
|
||||
|
@ -172,10 +173,10 @@ Status StorageManager::DoServiceStop() noexcept {
|
|||
return rc1;
|
||||
}
|
||||
|
||||
StorageManager::StorageManager(const Path &root) : root_(root), pool_size_(1), file_id_(0), index_() {}
|
||||
StorageManager::StorageManager(const Path &root) : root_(root), file_id_(0), index_(), pool_size_(1) {}
|
||||
|
||||
StorageManager::StorageManager(const Path &root, int pool_size)
|
||||
: root_(root), pool_size_(pool_size), file_id_(0), index_() {}
|
||||
: root_(root), file_id_(0), index_(), pool_size_(pool_size) {}
|
||||
|
||||
StorageManager::~StorageManager() { (void)StorageManager::DoServiceStop(); }
|
||||
|
||||
|
|
|
@ -248,7 +248,6 @@ Status RandomDataOp::WorkerEntry(int32_t worker_id) {
|
|||
|
||||
// Send new_row out
|
||||
RETURN_IF_NOT_OK(out_connector_->Add(std::move(new_row), worker_id));
|
||||
|
||||
} else {
|
||||
// Now, let's enter the epoch sync
|
||||
RETURN_IF_NOT_OK(EpochSync(worker_id, &quitting));
|
||||
|
|
|
@ -24,7 +24,7 @@
|
|||
namespace mindspore {
|
||||
namespace dataset {
|
||||
MindRecordSamplerRT::MindRecordSamplerRT(mindrecord::ShardReader *shard_reader, int64_t samples_per_tensor)
|
||||
: SamplerRT(0, samples_per_tensor), next_id_(0), shard_reader_(shard_reader) {}
|
||||
: SamplerRT(0, samples_per_tensor), shard_reader_(shard_reader), sample_ids_(nullptr), next_id_(0) {}
|
||||
|
||||
Status MindRecordSamplerRT::GetNextSample(TensorRow *out) {
|
||||
if (next_id_ > num_samples_) {
|
||||
|
@ -50,7 +50,6 @@ Status MindRecordSamplerRT::GetNextSample(TensorRow *out) {
|
|||
|
||||
Status MindRecordSamplerRT::InitSampler() {
|
||||
sample_ids_ = shard_reader_->GetSampleIds();
|
||||
|
||||
if (!sample_ids_) {
|
||||
// Note, sample_ids_.empty() is okay and will just give no sample ids.
|
||||
RETURN_STATUS_UNEXPECTED(
|
||||
|
|
|
@ -26,7 +26,6 @@
|
|||
|
||||
namespace mindspore {
|
||||
namespace dataset {
|
||||
|
||||
#ifndef ENABLE_ANDROID
|
||||
// This function not only creates a runtime sampler object, but also creates a ShardReader,
|
||||
// which will also be needed to build a runtime MindRecordOp
|
||||
|
@ -51,6 +50,5 @@ Status MindRecordSamplerObj::GetShardReader(std::unique_ptr<mindrecord::ShardRea
|
|||
return Status::OK();
|
||||
}
|
||||
#endif
|
||||
|
||||
} // namespace dataset
|
||||
} // namespace mindspore
|
||||
|
|
|
@ -278,8 +278,7 @@ Status Path::CloseFile(int fd) const {
|
|||
}
|
||||
|
||||
Status Path::TruncateFile(int fd) const {
|
||||
int rc;
|
||||
rc = ftruncate(fd, 0);
|
||||
int rc = ftruncate(fd, 0);
|
||||
if (rc == 0) {
|
||||
return Status::OK();
|
||||
} else {
|
||||
|
|
|
@ -1320,8 +1320,7 @@ TASK_RETURN_CONTENT ShardReader::ConsumerOneTask(int task_id, uint32_t consumer_
|
|||
uint32_t blob_end = 0;
|
||||
json var_fields;
|
||||
// Pick up task from task list
|
||||
ShardTask task;
|
||||
task = tasks_.GetTaskByID(task_id);
|
||||
ShardTask task = tasks_.GetTaskByID(task_id);
|
||||
|
||||
// check task type
|
||||
auto task_type = std::get<0>(task);
|
||||
|
|
|
@ -43,8 +43,7 @@ int64_t ShardShuffle::GetNumSamples(int64_t dataset_size, int64_t num_classes) {
|
|||
}
|
||||
|
||||
MSRStatus ShardShuffle::CategoryShuffle(ShardTaskList &tasks) {
|
||||
uint32_t individual_size;
|
||||
individual_size = tasks.sample_ids_.size() / tasks.categories;
|
||||
uint32_t individual_size = tasks.sample_ids_.size() / tasks.categories;
|
||||
std::vector<std::vector<int>> new_permutations(tasks.categories, std::vector<int>(individual_size));
|
||||
for (uint32_t i = 0; i < tasks.categories; i++) {
|
||||
for (uint32_t j = 0; j < individual_size; j++) new_permutations[i][j] = static_cast<int>(j);
|
||||
|
|
|
@ -79,7 +79,8 @@ uint32_t ShardTaskList::SizeOfRows() const {
|
|||
if (task_list_.size() == 0) return static_cast<uint32_t>(0);
|
||||
|
||||
// 1 task is 1 page
|
||||
auto sum_num_rows = [](int x, ShardTask y) { return x + std::get<2>(y)[0]; };
|
||||
const size_t kBlobInfoIndex = 2;
|
||||
auto sum_num_rows = [](int x, ShardTask y) { return x + std::get<kBlobInfoIndex>(y)[0]; };
|
||||
uint32_t nRows = std::accumulate(task_list_.begin(), task_list_.end(), 0, sum_num_rows);
|
||||
return nRows;
|
||||
}
|
||||
|
|
|
@ -229,6 +229,13 @@ def check_uint32(value, arg_name=""):
|
|||
|
||||
|
||||
def check_pos_uint32(value, arg_name=""):
|
||||
"""
|
||||
Validates the value of a variable is within the range of positive uint32.
|
||||
|
||||
:param value: the value of the variable
|
||||
:param arg_name: name of the variable to be validated
|
||||
:return: Exception: when the validation fails, nothing otherwise.
|
||||
"""
|
||||
type_check(value, (int,), arg_name)
|
||||
check_value(value, [POS_INT_MIN, UINT32_MAX])
|
||||
|
||||
|
|
|
@ -21,6 +21,7 @@ from .validators import check_one_hot_op, check_compose_list, check_random_apply
|
|||
from . import py_transforms_util as util
|
||||
from .c_transforms import TensorOperation
|
||||
|
||||
|
||||
def not_random(function):
|
||||
"""
|
||||
Specify the function as "not random", i.e., it produces deterministic result.
|
||||
|
|
|
@ -148,7 +148,7 @@ class FuncWrapper:
|
|||
try:
|
||||
if hasattr(self.transform, "random") and not self.transform.random:
|
||||
self.random = False
|
||||
except Exception:
|
||||
except KeyError:
|
||||
self.random = True
|
||||
|
||||
def __call__(self, *args):
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
# ============================================================================
|
||||
|
||||
CURPATH="$(dirname "$0")"
|
||||
# shellcheck source=/dev/null
|
||||
. ${CURPATH}/cache_util.sh
|
||||
|
||||
run_ascend()
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
# ============================================================================
|
||||
|
||||
CURPATH="$(dirname "$0")"
|
||||
# shellcheck source=/dev/null
|
||||
. ${CURPATH}/cache_util.sh
|
||||
|
||||
if [ $# != 3 ] && [ $# != 4 ] && [ $# != 5 ]
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
# ============================================================================
|
||||
|
||||
CURPATH="$(dirname "$0")"
|
||||
# shellcheck source=/dev/null
|
||||
. ${CURPATH}/cache_util.sh
|
||||
|
||||
if [ $# != 2 ] && [ $# != 3 ] && [ $# != 4 ]
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
# ============================================================================
|
||||
|
||||
CURPATH="$(dirname "$0")"
|
||||
# shellcheck source=/dev/null
|
||||
. ${CURPATH}/cache_util.sh
|
||||
|
||||
if [ $# != 2 ] && [ $# != 3 ] && [ $# != 4 ]
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
# ============================================================================
|
||||
|
||||
CURPATH="$(dirname "$0")"
|
||||
# shellcheck source=/dev/null
|
||||
. ${CURPATH}/cache_util.sh
|
||||
|
||||
if [ $# != 2 ] && [ $# != 3 ] && [ $# != 4 ]
|
||||
|
|
Loading…
Reference in New Issue