clean code

This commit is contained in:
ckey_Dou 2022-08-04 17:23:52 +08:00
parent 0aeef0019d
commit 775e886110
7 changed files with 28 additions and 26 deletions

View File

@ -413,7 +413,7 @@ bool GeGraphExecutor::RunGraph(const FuncGraphPtr &graph, const std::vector<tens
// memcpy_s does not support data that more than 2GB
(void)memcpy(reinterpret_cast<uint8_t *>(output_addr->GetMutablePtr()), tensor->GetData(), tensor->GetSize());
auto actual_shapes = tensor->GetTensorDesc().GetShape().GetDims();
output_shapes.emplace_back(std::move(actual_shapes));
(void)output_shapes.emplace_back(std::move(actual_shapes));
}
UpdateOutputNodeShape(graph_outputs, me_types, output_shapes);
MS_LOG(INFO) << "GE run graph end.";

View File

@ -424,12 +424,12 @@ void BroadcastIterator::GenNextPos() {
void BroadcastIterator::BroadcastShape() {
int input_dimension_a = input_shape_a_.size();
if (input_dimension_a < output_dimension_) {
(void)input_shape_a_.insert(input_shape_a_.begin(), IntToLong(output_dimension_ - input_dimension_a), 1);
(void)input_shape_a_.insert(input_shape_a_.begin(), IntToSize(output_dimension_ - input_dimension_a), 1);
}
int input_dimension_b = input_shape_b_.size();
if (input_dimension_b < output_dimension_) {
(void)input_shape_b_.insert(input_shape_b_.begin(), IntToLong(output_dimension_ - input_dimension_b), 1);
(void)input_shape_b_.insert(input_shape_b_.begin(), IntToSize(output_dimension_ - input_dimension_b), 1);
}
}
@ -499,7 +499,7 @@ void MultipleBroadcastIterator::BroadcastShape() {
for (auto &multi_input : multi_inputs_) {
int input_dimension = SizeToInt(multi_input.size());
if (input_dimension < output_dimension_) {
(void)multi_input.insert(multi_input.begin(), IntToLong(output_dimension_ - input_dimension), 1);
(void)multi_input.insert(multi_input.begin(), IntToSize(output_dimension_ - input_dimension), 1);
}
}
}

View File

@ -160,7 +160,8 @@ bool GatherDGradCpuKernelMod::LaunchKernel(const std::vector<kernel::AddressPtr>
// grad_cargo_size
std::vector<size_t> grad_cargo_size = std::vector<size_t>(grad_shape_.size(), 1);
for (int i = static_cast<int>(grad_cargo_size.size()) - 2; i >= 0; --i) {
grad_cargo_size[i] = grad_shape_[i + 1] * grad_cargo_size[i + 1];
auto idx = IntToSize(i);
grad_cargo_size[idx] = grad_shape_[idx + 1] * grad_cargo_size[idx + 1];
}
// copy task

View File

@ -141,7 +141,7 @@ std::vector<T> L2NormalizeGradCpuFunc<T>::GetVector(const std::vector<size_t> &h
auto x_shape = input_shape_list_[0];
std::vector<T> x_vector;
auto idx = IntToSize(axis_);
x_vector.reserve(x_shape[idx]);
x_vector.reserve(LongToSize(x_shape[idx]));
for (size_t i = 0; i < LongToSize(x_shape[idx]); i++) {
size_t oneDimIndex = 0;
std::vector<size_t> tmp_high_dim_index = high_dim_index;

View File

@ -293,7 +293,7 @@ bool ReduceCpuKernelFunc<T>::RunFunc(const std::vector<kernel::AddressPtr> &inpu
axes[k] = i;
++k;
} else {
stride *= static_cast<size_t>(input_shape_[i]);
stride *= LongToSize(input_shape_[IntToSize(i)]);
++j;
}
}

View File

@ -79,7 +79,7 @@ bool SpaceToDepthCpuKernelMod::LaunchKernel(const std::vector<kernel::AddressPtr
auto task = [&, input_addr, output_addr](size_t start, size_t end) {
std::vector<int64_t> input_pos_array(input_dimension, 0);
for (size_t i = start; i < end; ++i) {
int64_t tmp_pos = i;
int64_t tmp_pos = SizeToLong(i);
for (size_t j = 0; j < input_dimension - 1; ++j) {
input_pos_array[j] = tmp_pos / input_strides[j];
tmp_pos %= input_strides[j];

View File

@ -684,21 +684,29 @@ class EinsumHelper {
return true;
}
inline bool SetValue(size_t cur_element, int64_t value) {
if (element_shape_map_.find(cur_element) != element_shape_map_.end()) {
if (element_shape_map_[cur_element][0] != value) {
MS_LOG(ERROR) << "For " << node_name_
<< ", the same label in equation can only represent the same dimension in inputs, but the "
<< static_cast<char>(cur_element + 'a') << " in equation not.";
return false;
}
} else {
element_shape_map_[cur_element] = {value};
}
return true;
}
bool ElementMapShape(const std::vector<std::vector<int64_t>> &input_shapes) {
for (size_t idx_input = 0; idx_input < input_shapes.size(); ++idx_input) {
auto cur_shape = input_shapes[idx_input];
size_t idx_left = 0;
while (idx_left < left_elements_[idx_input].size() && left_elements_[idx_input][idx_left] != ELL_VAL) {
auto cur_element = left_elements_[idx_input][idx_left];
if (element_shape_map_.find(cur_element) != element_shape_map_.end()) {
if (element_shape_map_[cur_element][0] != input_shapes[idx_input][idx_left]) {
MS_LOG(ERROR) << "For " << node_name_
<< ", the same label in equation can only represent the same dimension in inputs, but the "
<< static_cast<char>(cur_element + 'a') << " in equation not.";
return false;
}
} else {
element_shape_map_[cur_element] = {input_shapes[idx_input][idx_left]};
if (!SetValue(cur_element, input_shapes[idx_input][idx_left])) {
return false;
}
++idx_left;
}
@ -708,15 +716,8 @@ class EinsumHelper {
auto idx_shape_right = input_shapes[idx_input].size() - 1;
while (idx_element_right > idx_left && left_elements_[idx_input][idx_element_right] != ELL_VAL) {
auto cur_element = left_elements_[idx_input][idx_element_right];
if (element_shape_map_.find(cur_element) != element_shape_map_.end()) {
if (element_shape_map_[cur_element][0] != input_shapes[idx_input][idx_shape_right]) {
MS_LOG(ERROR) << "For " << node_name_
<< ", the same label in equation can only represent the same dimension in inputs, but the "
<< static_cast<char>(cur_element + 'a') << " in equation not.";
return false;
}
} else {
element_shape_map_[cur_element] = {input_shapes[idx_input][idx_shape_right]};
if (!SetValue(cur_element, input_shapes[idx_input][idx_shape_right])) {
return false;
}
--idx_shape_right;
--idx_element_right;