!66856 [CleanCode] 告警清理

Merge pull request !66856 from jiaxueyu/bugfix
This commit is contained in:
i-robot 2024-03-21 11:52:45 +00:00 committed by Gitee
commit 91c6b7a30d
No known key found for this signature in database
GPG Key ID: 173E9B9CA92EEF8F
9 changed files with 31 additions and 34 deletions

View File

@ -14,7 +14,6 @@
* limitations under the License.
*/
#include "plugin/device/ascend/kernel/opapi/aclnn/sigmoid_grad_aclnn_kernel.h"
#include <algorithm>
#include <vector>
#include <map>
#include <memory>

View File

@ -81,7 +81,8 @@ void DynamicStitchCpuKernelMod::UpdateOutputShapeAndSize(const std::vector<Kerne
outputs[kIndex0]->SetShapeVector(result_shape_);
auto data_dtype = inputs[kIndex1]->dtype_id();
auto data_dtype_size = GetTypeByte(TypeIdToType(data_dtype));
size_t batch_size = std::accumulate(result_shape_.cbegin(), result_shape_.cend(), 1, std::multiplies<size_t>());
size_t batch_size =
LongToSize(std::accumulate(result_shape_.cbegin(), result_shape_.cend(), 1, std::multiplies<int64_t>()));
outputs[kIndex0]->set_size(batch_size * data_dtype_size);
}

View File

@ -44,6 +44,7 @@ bool EnvironCreateCpuKernelMod::Launch(const std::vector<KernelTensor *> &, cons
int64_t env_handle = EnvironMgr::GetInstance().Create();
auto output = GetDeviceAddress<int64_t>(outputs, kIndex0);
MS_EXCEPTION_IF_NULL(output);
output[kIndex0] = env_handle;
MS_LOG(DEBUG) << "Create env handle: " << output[kIndex0];

View File

@ -60,7 +60,9 @@ bool EnvironSetCpuKernelMod::Launch(const std::vector<KernelTensor *> &inputs, c
auto input_key = GetDeviceAddress<int64_t>(inputs, kIndex1);
auto input_value = GetDeviceAddress<void>(inputs, kIndex2);
auto output_handle = GetDeviceAddress<int64_t>(outputs, kIndex0);
MS_EXCEPTION_IF_NULL(input_handle);
MS_EXCEPTION_IF_NULL(input_key);
MS_EXCEPTION_IF_NULL(output_handle);
// Get host handle and host key.
int64_t host_handle = input_handle[kIndex0];
int64_t host_key = input_key[kIndex0];

View File

@ -80,7 +80,7 @@ void PadAndShiftCpuKernelMod::LaunchKernel(const std::vector<KernelTensor *> &in
void PadAndShiftCpuKernelMod::UpdateOutputShapeAndSize(const std::vector<KernelTensor *> &inputs,
const std::vector<KernelTensor *> &outputs) {
ShapeVector output_shape(input_shape_.begin(), input_shape_.end());
output_shape[kIndex0] = output_size_;
output_shape[kIndex0] = static_cast<int64_t>(output_size_);
outputs[kIndex0]->SetShapeVector(output_shape);
outputs[kIndex0]->set_size(output_size_ * type_size_);
}

View File

@ -257,6 +257,21 @@ bool PyFuncCpuKernelMod::Launch(const std::vector<KernelTensor *> &inputs, const
return ExecuteKernel(inputs, outputs);
}
void GetTypeInfo(const PrimitivePtr &primitive, const std::vector<KernelTensor *> &inputs, const std::string &arg_name,
std::vector<TypeId> *types) {
if (primitive->HasAttr(arg_name)) {
const auto &type_ptrs = GetValue<std::vector<TypePtr>>(primitive->GetAttr(arg_name));
(void)std::for_each(type_ptrs.begin(), type_ptrs.end(), [&types](auto p) {
MS_EXCEPTION_IF_NULL(p);
(void)types->emplace_back(p->type_id());
});
} else {
for (size_t i = 0; i < inputs.size(); ++i) {
types->emplace_back(inputs[i]->dtype_id());
}
}
}
void PyFuncCpuKernelMod::BuildFuncInfo(const PrimitivePtr &primitive, const std::vector<KernelTensor *> &inputs,
const std::vector<KernelTensor *> &outputs) {
std::vector<TypeId> in_types;
@ -264,29 +279,8 @@ void PyFuncCpuKernelMod::BuildFuncInfo(const PrimitivePtr &primitive, const std:
std::vector<std::vector<int64_t>> in_shapes;
std::vector<std::vector<int64_t>> out_shapes;
if (primitive->HasAttr("in_types")) {
const auto &in_type_ptrs = GetValue<std::vector<TypePtr>>(primitive->GetAttr("in_types"));
(void)std::for_each(in_type_ptrs.begin(), in_type_ptrs.end(), [&in_types](auto p) {
MS_EXCEPTION_IF_NULL(p);
(void)in_types.emplace_back(p->type_id());
});
} else {
for (size_t i = 0; i < inputs.size(); ++i) {
in_types.emplace_back(inputs[i]->dtype_id());
}
}
if (primitive->HasAttr("out_types")) {
const auto &out_type_ptrs = GetValue<std::vector<TypePtr>>(primitive->GetAttr("out_types"));
(void)std::for_each(out_type_ptrs.begin(), out_type_ptrs.end(), [&out_types](auto p) {
MS_EXCEPTION_IF_NULL(p);
(void)out_types.emplace_back(p->type_id());
});
} else {
for (size_t i = 0; i < outputs.size(); ++i) {
out_types.emplace_back(outputs[i]->dtype_id());
}
}
GetTypeInfo(primitive, inputs, "in_types", &in_types);
GetTypeInfo(primitive, inputs, "out_types", &out_types);
if (primitive->HasAttr("in_shapes")) {
in_shapes = GetValue<std::vector<std::vector<int64_t>>>(primitive->GetAttr("in_shapes"));

View File

@ -211,16 +211,16 @@ void SparseFillEmptyRowsCpuKernelMod::UpdateOutputShapeAndSize(const std::vector
outputs[kIndex2]->SetShapeVector(out_empty_row_indicator_shape_);
outputs[kIndex3]->SetShapeVector(out_reverse_index_shape_);
size_t out_indice_batch =
std::accumulate(out_indice_shape.cbegin(), out_indice_shape.cend(), 1, std::multiplies<size_t>());
LongToSize(std::accumulate(out_indice_shape.cbegin(), out_indice_shape.cend(), 1, std::multiplies<int64_t>()));
auto out_indice_dtype_size = GetTypeByte(TypeIdToType(output_indices_type_));
size_t out_values_batch =
std::accumulate(out_values_shape.cbegin(), out_values_shape.cend(), 1, std::multiplies<size_t>());
LongToSize(std::accumulate(out_values_shape.cbegin(), out_values_shape.cend(), 1, std::multiplies<int64_t>()));
auto out_values_dtype_size = GetTypeByte(TypeIdToType(output_values_type_));
size_t out_empty_row_indicator_batch = std::accumulate(
out_empty_row_indicator_shape_.cbegin(), out_empty_row_indicator_shape_.cend(), 1, std::multiplies<size_t>());
size_t out_empty_row_indicator_batch = LongToSize(std::accumulate(
out_empty_row_indicator_shape_.cbegin(), out_empty_row_indicator_shape_.cend(), 1, std::multiplies<int64_t>()));
auto out_empty_row_indicator_dtype_size = GetTypeByte(TypeIdToType(output_empty_row_indicator_type_));
size_t out_reverse_index_batch =
std::accumulate(out_reverse_index_shape_.cbegin(), out_reverse_index_shape_.cend(), 1, std::multiplies<size_t>());
size_t out_reverse_index_batch = LongToSize(
std::accumulate(out_reverse_index_shape_.cbegin(), out_reverse_index_shape_.cend(), 1, std::multiplies<int64_t>()));
auto out_reverse_index_dtype_size = GetTypeByte(TypeIdToType(output_reverse_index_type_));
outputs[kIndex0]->set_size(out_indice_batch * out_indice_dtype_size);
outputs[kIndex1]->set_size(out_values_batch * out_values_dtype_size);

View File

@ -1364,6 +1364,7 @@ void KernelRuntime::GenKernelTensorLaunchArgs(const CNodePtr &cnode, std::vector
#endif
for (auto index : clean_output_indexes) {
auto device_address = AnfAlgo::GetOutputAddr(pre_node, index);
MS_EXCEPTION_IF_NULL(device_address);
const auto &input = device_address->kernel_tensor();
MS_EXCEPTION_IF_NULL(input);
if (mem_scheduler != nullptr) {

View File

@ -14,7 +14,6 @@
# ============================================================================
"""Operators for array."""
# pylint: disable=unused-import
import copy
import itertools
import numbers