!17074 clean pclint

From: @fangzehua
Reviewed-by: @wuxuejian,@oacjiewen
Signed-off-by: @wuxuejian
This commit is contained in:
mindspore-ci-bot 2021-05-29 18:26:43 +08:00 committed by Gitee
commit 14e02438fb
8 changed files with 51 additions and 54 deletions

View File

@ -25,7 +25,8 @@ namespace mindspore {
namespace kernel {
template <typename T>
int Compress(HashmapEntry<T> *entry_p, const size_t &length, T entry) {
T i = (entry + 1) % length, off = 1;
T i = (entry + 1) % length;
int64_t off = 1;
int compress_count = 0;
for (; !entry_p[i].IsEmpty(); i = (i + 1) % length, off++) {
if (entry_p[i].tag_ > off) {
@ -62,9 +63,6 @@ void MapCacheIdxCPUKernel::InitKernel(const CNodePtr &kernel_node) {
MS_LOG(EXCEPTION) << "Dimension of HashMap must be 2, (n, 4)";
}
hashmap_length_ = hashmap_shape[0];
if (hashmap_length_ <= 0) {
MS_LOG(EXCEPTION) << "Hashmap length must > 0";
}
dtype_ = AnfAlgo::GetPrevNodeOutputInferDataType(kernel_node, 0);
}
@ -116,13 +114,13 @@ void MapCacheIdxCPUKernel::LaunchKernel(const std::vector<AddressPtr> &inputs,
size_t count = 1;
count_size += 1;
while ((!hashmap[tmp_entry].IsEmpty() && !hashmap[tmp_entry].IsKey(key))) {
tmp_entry = (tmp_entry + 1) % hashmap_length_;
tmp_entry = (tmp_entry + 1) % static_cast<T>(hashmap_length_);
if (count > hashmap_length_) {
MS_LOG(EXCEPTION) << "Hashmap is full, search cache idx failed, please set a larger vocab_cache_size!";
}
count += 1;
}
total_count += count;
total_count += SizeToFloat(count);
if (hashmap[tmp_entry].IsEmpty()) {
miss_idx.emplace_back(i);
output_miss_emb_idx[miss_count] = key;
@ -149,19 +147,19 @@ void MapCacheIdxCPUKernel::LaunchKernel(const std::vector<AddressPtr> &inputs,
T entry = HashFunc(emb_idx, hashmap_length_);
size_t tag_count = 1;
while (!hashmap[entry].IsEmpty()) {
entry = (entry + 1) % hashmap_length_;
entry = (entry + 1) % static_cast<T>(hashmap_length_);
if (tag_count > hashmap_length_) {
MS_LOG(EXCEPTION) << "Hashmap is full, insert new key failed, please set a larger vocab_cache_size!";
}
tag_count++;
}
hashmap[entry].key_ = emb_idx;
hashmap[entry].step_ = step_[0];
hashmap[entry].tag_ = tag_count;
T tmp_entry = (entry + 1) % hashmap_length_;
hashmap[entry].step_ = SizeToLong(step_[0]);
hashmap[entry].tag_ = SizeToLong(tag_count);
T tmp_entry = (entry + 1) % static_cast<T>(hashmap_length_);
size_t delete_count = 1;
while (hashmap[tmp_entry].IsEmpty() || hashmap[tmp_entry].IsUsing(step_[0])) {
tmp_entry = (tmp_entry + 1) % hashmap_length_;
tmp_entry = (tmp_entry + 1) % static_cast<T>(hashmap_length_);
if (delete_count > hashmap_length_) {
MS_LOG(EXCEPTION) << "Hashmap is full, delete old key failed, please set a larger vocab_cache_size!";
}
@ -172,8 +170,8 @@ void MapCacheIdxCPUKernel::LaunchKernel(const std::vector<AddressPtr> &inputs,
hashmap[entry].value_ = output_swap_cache_idx[i];
hashmap[tmp_entry].SetEmpty();
int compress_count = Compress(hashmap, hashmap_length_, tmp_entry);
total_delete_count += (compress_count + delete_count);
total_insert_count += tag_count;
total_delete_count += IntToFloat(compress_count + SizeToInt(delete_count));
total_insert_count += SizeToFloat(tag_count);
}
if (miss_count != 0) {
MS_LOG(INFO) << "Insert count: " << total_insert_count / miss_count;

View File

@ -63,9 +63,9 @@ void PadAndShiftCPUKernel::LaunchKernel(const std::vector<AddressPtr> &inputs,
if (shift_idx >= static_cast<T>(cum_sum_size_)) {
MS_LOG(EXCEPTION) << "Shift index must small than cumsum size.";
}
size_t output_size = cum_sum_arr[cum_sum_size_ - 1];
T shift_size = cum_sum_arr[shift_idx];
T valid_size = cum_sum_arr[shift_idx + 1] - shift_size;
size_t output_size = static_cast<size_t>(cum_sum_arr[cum_sum_size_ - 1]);
size_t shift_size = static_cast<size_t>(cum_sum_arr[shift_idx]);
size_t valid_size = static_cast<size_t>(cum_sum_arr[shift_idx + 1] - shift_size);
int ret = memset_s(output, outputs[0]->size, -1, type_size_ * output_size);
if (ret != 0) {
MS_LOG(EXCEPTION) << "memset_s error, errorno" << ret;

View File

@ -66,7 +66,7 @@ void SubAndFilterCPUKernel::LaunchKernel(const std::vector<AddressPtr> &inputs,
T temp = input_x[i] - offset;
if (temp < 0 || temp >= max_num) continue;
filter_res[count] = temp;
filter_idx[count] = i;
filter_idx[count] = static_cast<T>(i);
count++;
}
MS_LOG(INFO) << "SubAndFilter output count is " << count;

View File

@ -81,9 +81,9 @@ void UpdateCacheCPUKernel::LaunchKernel(const std::vector<AddressPtr> &inputs,
if (indices[i] < 0 || indices[i] >= max_num_) continue;
char *tmp = update + i * one_length_size;
if (indices[i] * one_length_size + one_length_size <= max_size) {
int ret =
memcpy_s(input_x + indices[i] * one_length_size, max_size - indices[i] * one_length_size, tmp, one_length_size);
if (static_cast<size_t>(indices[i]) * one_length_size + one_length_size <= max_size) {
int ret = memcpy_s(input_x + static_cast<size_t>(indices[i]) * one_length_size,
max_size - static_cast<size_t>(indices[i]) * one_length_size, tmp, one_length_size);
if (ret != 0) {
MS_LOG(EXCEPTION) << "memcpy_s error, errorno" << ret;
}

View File

@ -39,7 +39,7 @@ class UpdateCacheCPUKernel : public CPUKernel {
private:
size_t batch_size_{1};
size_t update_size_{1};
int64_t update_size_{1};
size_t step_{0};
size_t update_length_{1};
int64_t max_num_ = 99999999;

View File

@ -149,8 +149,8 @@ void MemCopyFromHostToCache(void *hashmap_addr, void *host_addr, void *cache_add
size_t single_col_bytes = param_type_size * col_size;
for (size_t i = 0; i < hashmap_size; ++i) {
if (!hashmap_data[i].IsEmpty()) {
size_t host_offset = single_col_bytes * hashmap_data[i].key_;
size_t cache_offset = single_col_bytes * hashmap_data[i].value_;
size_t host_offset = single_col_bytes * static_cast<size_t>(hashmap_data[i].key_);
size_t cache_offset = single_col_bytes * static_cast<size_t>(hashmap_data[i].value_);
if (host_offset + single_col_bytes <= host_max) {
auto ret =
memcpy_s(cache_data + cache_offset, cache_max - cache_offset, host_data + host_offset, single_col_bytes);
@ -186,8 +186,8 @@ void BindAndInitCacheTensor(const ParamMap &param_pair_list, const ParameterPtr
MS_LOG(EXCEPTION) << "Got host shape and cache shape invalid."
<< "host shape:" << host_shape << ", cache shape:" << cache_shape;
}
auto host_data_max_size = host_tensor->Size();
auto cache_data_max_size = cache_tensor->Size();
auto host_data_max_size = static_cast<size_t>(host_tensor->Size());
auto cache_data_max_size = static_cast<size_t>(cache_tensor->Size());
if (hashmap_data_type == TypeId::kNumberTypeInt32) {
MemCopyFromHostToCache<int32_t>(hashmap_tensor->data_c(), host_tensor->data_c(), cache_tensor->data_c(),
host_data_max_size, cache_data_max_size, hashmap_size, host_shape[1]);
@ -212,12 +212,12 @@ void InitHashMapData(void *data, const int64_t host_size, const int64_t cache_si
MS_LOG(EXCEPTION) << "Memset failed.";
}
std::vector<T> host_range;
host_range.reserve(host_size);
host_range.reserve(static_cast<T>(host_size));
for (int64_t i = 0; i < host_size; ++i) {
host_range.emplace_back(i);
host_range.emplace_back(static_cast<T>(i));
}
std::random_shuffle(host_range.begin(), host_range.end());
size_t size = cache_size;
size_t size = static_cast<size_t>(cache_size);
size_t hashmap_count = 0;
for (size_t i = 0; i < size; ++i) {
auto random_key = host_range[i];
@ -225,14 +225,14 @@ void InitHashMapData(void *data, const int64_t host_size, const int64_t cache_si
size_t count = 1;
while (!hashmap_data[entry].IsEmpty() && !hashmap_data[entry].IsKey(random_key)) {
count += 1;
entry = (entry + 1) % hashmap_size;
entry = (entry + 1) % static_cast<T>(hashmap_size);
}
if (hashmap_data[entry].IsEmpty()) {
hashmap_count++;
hashmap_data[entry].key_ = random_key;
hashmap_data[entry].value_ = i;
hashmap_data[entry].value_ = SizeToLong(i);
hashmap_data[entry].step_ = kInitStep;
hashmap_data[entry].tag_ = count;
hashmap_data[entry].tag_ = SizeToLong(count);
}
}
MS_LOG(INFO) << "Hashmap init success, with " << hashmap_count << " / " << hashmap_size;
@ -241,7 +241,7 @@ void InitHashMapData(void *data, const int64_t host_size, const int64_t cache_si
AnfNodePtr InitHashMap(const FuncGraphPtr &func_graph, const int64_t host_size, const int64_t cache_size,
TypeId type_id) {
// init new tensor
size_t hashmap_size = cache_size * kEmptyRate;
size_t hashmap_size = static_cast<size_t>(cache_size * kEmptyRate);
std::vector<int64_t> host_shape{static_cast<int64_t>(hashmap_size), 4};
auto new_tensor = std::make_shared<tensor::Tensor>(type_id, host_shape);
size_t byte_size = new_tensor->Size();
@ -294,10 +294,10 @@ AnfNodePtr CreateMapCacheIdx(const FuncGraphPtr &func_graph, const AnfNodePtr &i
offset_value = rank_id * host_size;
}
auto offset = NewValueNode(MakeValue(offset_value));
auto max_num_imm = std::make_shared<Int64Imm>(SizeToLong(host_size));
auto max_num_imm = std::make_shared<Int64Imm>(host_size);
auto max_num_abstract_scalar = std::make_shared<abstract::AbstractScalar>(max_num_imm);
max_num->set_abstract(max_num_abstract_scalar);
auto offset_imm = std::make_shared<Int64Imm>(SizeToLong(offset_value));
auto offset_imm = std::make_shared<Int64Imm>(offset_value);
auto offset_abstract_scalar = std::make_shared<abstract::AbstractScalar>(offset_imm);
offset->set_abstract(offset_abstract_scalar);
@ -391,7 +391,7 @@ AnfNodePtr CreateUpdateCache(const FuncGraphPtr &graph, ParameterPtr params, Anf
auto params_shape = params_shp->shape();
auto max_size = params_shape[0];
auto max_size_node = NewValueNode(MakeValue(max_size));
auto max_num_imm = std::make_shared<Int64Imm>(SizeToLong(max_size));
auto max_num_imm = std::make_shared<Int64Imm>(max_size);
auto max_num_abstract_scalar = std::make_shared<abstract::AbstractScalar>(max_num_imm);
max_size_node->set_abstract(max_num_abstract_scalar);
@ -511,7 +511,7 @@ void RemoveOriginParamFromSet(const CNodePtr &unique_node, AnfSet *no_ref_params
if (input->isa<CNode>()) {
que.push(input->cast<CNodePtr>());
} else if (input->isa<Parameter>()) {
int num = no_ref_params->erase(input);
size_t num = no_ref_params->erase(input);
if (num > 0) {
MS_LOG(INFO) << "Erase unique_node input from set success.";
return;
@ -590,7 +590,6 @@ void ReplaceNoRefToParams(const FuncGraphPtr &graph, const AnfMap &no_ref_pipe_p
auto manager = graph->manager();
MS_EXCEPTION_IF_NULL(manager);
auto node_users = manager->node_users();
AnfNodePtrList control_depend_list;
// add other no ref pipe param and unique index dense
for (auto &ele : no_ref_pipe_param_map) {
auto user_set = node_users[ele.first];

View File

@ -284,8 +284,8 @@ void MemCopyFromCacheToHost(void *hashmap_addr, void *host_addr, void *cache_add
size_t single_col_bytes = param_type_size * col_size;
for (size_t i = 0; i < hashmap_size; ++i) {
if (!hashmap_data[i].IsEmpty()) {
size_t host_offset = single_col_bytes * hashmap_data[i].key_;
size_t cache_offset = single_col_bytes * hashmap_data[i].value_;
size_t host_offset = single_col_bytes * LongToSize(hashmap_data[i].key_);
size_t cache_offset = single_col_bytes * LongToSize(hashmap_data[i].value_);
if (cache_offset + single_col_bytes <= cache_max) {
auto ret =
memcpy_s(host_data + host_offset, host_max - host_offset, cache_data + cache_offset, single_col_bytes);
@ -319,8 +319,8 @@ void TensorPy::FlushFromCache(const Tensor &tensor) {
MS_LOG(EXCEPTION) << "Got host shape and cache shape invalid."
<< "host shape:" << host_shape << ", cache shape:" << cache_shape;
}
auto host_data_max_size = tensor.Size();
auto cache_data_max_size = cache_tensor_ptr->Size();
auto host_data_max_size = static_cast<size_t>(tensor.Size());
auto cache_data_max_size = static_cast<size_t>(cache_tensor_ptr->Size());
auto hashmap_data_type = hashmap_tensor_ptr->data_type();
if (hashmap_data_type == TypeId::kNumberTypeInt32) {
MemCopyFromCacheToHost<int32_t>(hashmap_tensor_ptr->data_c(), tensor.data_c(), cache_tensor_ptr->data_c(),

View File

@ -794,17 +794,17 @@ AbstractBasePtr InferImplReshape(const AnalysisEnginePtr &, const PrimitivePtr &
auto max_shape = shape;
auto min_shape = shape;
int x_num = 1;
int x_min_num = 1;
int x_max_num = 1;
for (int value : x_shape) {
x_num = IntMulWithOverflowCheck(value, x_num);
int64_t x_num = 1;
int64_t x_min_num = 1;
int64_t x_max_num = 1;
for (int64_t value : x_shape) {
x_num = LongMulWithOverflowCheck(value, x_num);
}
for (int value : x_min_shape) {
x_min_num = IntMulWithOverflowCheck(value, x_min_num);
for (int64_t value : x_min_shape) {
x_min_num = LongMulWithOverflowCheck(value, x_min_num);
}
for (int value : x_max_shape) {
x_max_num = IntMulWithOverflowCheck(value, x_max_num);
for (int64_t value : x_max_shape) {
x_max_num = LongMulWithOverflowCheck(value, x_max_num);
}
auto it_first = find(shape.begin(), shape.end(), -1);
@ -814,11 +814,11 @@ AbstractBasePtr InferImplReshape(const AnalysisEnginePtr &, const PrimitivePtr &
MS_LOG(EXCEPTION) << "At most one component of input shape can be -1";
}
auto index = std::distance(shape.begin(), it_first);
int infer_value = x_num;
int infer_min_value = x_min_num;
int infer_max_value = x_max_num;
int64_t infer_value = x_num;
int64_t infer_min_value = x_min_num;
int64_t infer_max_value = x_max_num;
for (size_t i = 0; i < shape.size(); ++i) {
int value = shape[i];
int64_t value = shape[i];
if (value != -1 && value != 0) {
infer_value = infer_value / value;
infer_min_value = infer_min_value / value;