!39021 Master static code clean

Merge pull request !39021 from chenfei_mindspore/master
This commit is contained in:
i-robot 2022-07-30 09:46:51 +00:00 committed by Gitee
commit f303ca2a14
No known key found for this signature in database
GPG Key ID: 173E9B9CA92EEF8F
21 changed files with 96 additions and 98 deletions

View File

@ -721,7 +721,7 @@ EvaluatorPtr AnalysisEngine::HandleNestedRecursion(const std::vector<EvaluatorPt
// Check whether sub loop has untraced undetermined evaluator.
mindspore::HashSet<EvaluatorArgs, EvaluatorArgsHasher, EvaluatorArgsEqual> undetermined_evals;
for (auto r_it = eval_trace_.crbegin(); r_it != latest_entry_iter; r_it++) {
undetermined_evals.insert(*r_it);
(void)undetermined_evals.insert(*r_it);
}
MS_LOG(DEBUG) << "undetermined_evals size(): " << undetermined_evals.size();

View File

@ -18,6 +18,7 @@
#include <set>
#include <map>
#include <functional>
#include <tuple>
#include "plugin/device/cpu/hal/device/cpu_device_address.h"
#include "mindspore/core/ops/grad/deformable_offsets_grad.h"
@ -106,26 +107,55 @@ struct OffsetIndex {
};
struct InputXIndex {
float i;
float j;
size_t i;
size_t j;
};
} // namespace
std::mutex mutex_;
template <typename T>
void MsAtomicAdd(T *output_grad_x, size_t output_grad_base_pos, T added_value) {
void MsAtomicAdd(T *output_grad_x, const size_t &output_grad_base_pos, const T &added_value) {
std::lock_guard<std::mutex> lock(mutex_);
output_grad_x[output_grad_base_pos] += added_value;
}
inline std::tuple<size_t, size_t, size_t> CalPosition(const OffsetIndex &offset_index,
const OffsetStride &offset_stride, const GradStride &grad_stride,
const InputXStride &input_x_stride) {
const size_t offset_index_base_pos =
offset_index.n_i * offset_stride.n_stride +
offset_index.deformable_group_i * offset_stride.deformable_group_stride +
offset_index.kernel_i * offset_stride.kernel_h_stride + offset_index.kernel_j * offset_stride.kernel_w_stride +
offset_index.offset_i * offset_stride.offset_h_stride + offset_index.offset_j * offset_stride.offset_w_stride;
const size_t input_grad_base_pos =
offset_index.n_i * grad_stride.n_stride + offset_index.offset_i * grad_stride.offset_h_stride +
offset_index.offset_j * grad_stride.offset_w_stride + offset_index.kernel_i * grad_stride.kernel_h_stride +
offset_index.kernel_j * grad_stride.kernel_w_stride +
offset_index.deformable_group_i * grad_stride.deformable_group_stride;
const size_t input_x_base_pos = offset_index.n_i * input_x_stride.n_stride +
offset_index.deformable_group_i * input_x_stride.deformable_group_stride;
return {offset_index_base_pos, input_grad_base_pos, input_x_base_pos};
}
inline InputXIndex CalInputXIndex(const OffsetIndex &offset_index, const DeformableOffsetGradDims &dims) {
InputXIndex input_x_index;
input_x_index.i = dims.pad_top;
input_x_index.j = dims.pad_left;
input_x_index.i += offset_index.offset_i * dims.stride_h + offset_index.kernel_i * dims.dilation_h;
input_x_index.j += offset_index.offset_j * dims.stride_w + offset_index.kernel_j * dims.dilation_w;
return input_x_index;
}
template <typename T>
void DeformableOffsetGradKernel(const OffsetIndex &offset_index, const OffsetStride &offset_stride,
const GradStride &grad_stride, const DeformableOffsetGradDims &dims,
const InputXIndex &input_x_index, const InputXStride &input_x_stride,
const size_t &offset_index_base_pos, const size_t &input_grad_base_pos,
const size_t &input_x_base_pos, T *input_x, T *input_offset, T *input_grad,
T *output_grad_x, T *output_grad_offset) {
const InputXStride &input_x_stride, const T *input_x, const T *input_offset,
const T *input_grad, T *output_grad_x, T *output_grad_offset) {
const auto [offset_index_base_pos, input_grad_base_pos, input_x_base_pos] =
CalPosition(offset_index, offset_stride, grad_stride, input_x_stride);
const auto input_x_index = CalInputXIndex(offset_index, dims);
const size_t offset_index_i = offset_index_base_pos + offset_stride.position_stride;
const size_t offset_index_weight = offset_index_base_pos + 2 * offset_stride.position_stride;
float offset_i = static_cast<float>(input_offset[offset_index_i]);
@ -137,10 +167,10 @@ void DeformableOffsetGradKernel(const OffsetIndex &offset_index, const OffsetStr
float ceil_offset_i = floor_offset_i + 1;
float ceil_offset_j = floor_offset_j + 1;
float floor_i = input_x_index.i + floor_offset_i;
float floor_j = input_x_index.j + floor_offset_j;
float ceil_i = input_x_index.i + ceil_offset_i;
float ceil_j = input_x_index.j + ceil_offset_j;
float floor_i = SizeToFloat(input_x_index.i) + floor_offset_i;
float floor_j = SizeToFloat(input_x_index.j) + floor_offset_j;
float ceil_i = SizeToFloat(input_x_index.i) + ceil_offset_i;
float ceil_j = SizeToFloat(input_x_index.j) + ceil_offset_j;
float ceil_weight_i = offset_i + 1 - ceil_offset_i;
float ceil_weight_j = offset_j + 1 - ceil_offset_j;
@ -276,28 +306,7 @@ void DeformableOffsetsGradCpuKernelMod::DeformableOffsetGradNHWCKernel(size_t nu
offset_index.offset_i = tmp % dims.offset_h;
offset_index.n_i = tmp / dims.offset_h;
const size_t offset_index_base_pos =
offset_index.n_i * offset_stride.n_stride +
offset_index.deformable_group_i * offset_stride.deformable_group_stride +
offset_index.kernel_i * offset_stride.kernel_h_stride + offset_index.kernel_j * offset_stride.kernel_w_stride +
offset_index.offset_i * offset_stride.offset_h_stride + offset_index.offset_j * offset_stride.offset_w_stride;
const size_t input_grad_base_pos =
offset_index.n_i * grad_stride.n_stride + offset_index.offset_i * grad_stride.offset_h_stride +
offset_index.offset_j * grad_stride.offset_w_stride + offset_index.kernel_i * grad_stride.kernel_h_stride +
offset_index.kernel_j * grad_stride.kernel_w_stride +
offset_index.deformable_group_i * grad_stride.deformable_group_stride;
const size_t input_x_base_pos = offset_index.n_i * input_x_stride.n_stride +
offset_index.deformable_group_i * input_x_stride.deformable_group_stride;
InputXIndex input_x_index;
input_x_index.i = -1.0 * dims.pad_top;
input_x_index.j = -1.0 * dims.pad_left;
input_x_index.i += offset_index.offset_i * dims.stride_h + offset_index.kernel_i * dims.dilation_h;
input_x_index.j += offset_index.offset_j * dims.stride_w + offset_index.kernel_j * dims.dilation_w;
DeformableOffsetGradKernel(offset_index, offset_stride, grad_stride, dims, input_x_index, input_x_stride,
offset_index_base_pos, input_grad_base_pos, input_x_base_pos, input_x, input_offset,
DeformableOffsetGradKernel(offset_index, offset_stride, grad_stride, dims, input_x_stride, input_x, input_offset,
input_grad, output_grad_x, output_grad_offset);
}
};
@ -351,27 +360,7 @@ void DeformableOffsetsGradCpuKernelMod::DeformableOffsetGradNCHWKernel(size_t nu
offset_index.deformable_group_i = tmp % dims.deformable_group;
offset_index.n_i = tmp / dims.deformable_group;
const size_t offset_index_base_pos =
offset_index.n_i * offset_stride.n_stride +
offset_index.deformable_group_i * offset_stride.deformable_group_stride +
offset_index.kernel_i * offset_stride.kernel_h_stride + offset_index.kernel_j * offset_stride.kernel_w_stride +
offset_index.offset_i * offset_stride.offset_h_stride + offset_index.offset_j * offset_stride.offset_w_stride;
const size_t input_grad_base_pos =
offset_index.n_i * grad_stride.n_stride + offset_index.offset_i * grad_stride.offset_h_stride +
offset_index.offset_j * grad_stride.offset_w_stride + offset_index.kernel_i * grad_stride.kernel_h_stride +
offset_index.kernel_j * grad_stride.kernel_w_stride +
offset_index.deformable_group_i * grad_stride.deformable_group_stride;
const size_t input_x_base_pos = offset_index.n_i * input_x_stride.n_stride +
offset_index.deformable_group_i * input_x_stride.deformable_group_stride;
InputXIndex input_x_index;
input_x_index.i = -1.0 * dims.pad_top;
input_x_index.j = -1.0 * dims.pad_left;
input_x_index.i += offset_index.offset_i * dims.stride_h + offset_index.kernel_i * dims.dilation_h;
input_x_index.j += offset_index.offset_j * dims.stride_w + offset_index.kernel_j * dims.dilation_w;
DeformableOffsetGradKernel(offset_index, offset_stride, grad_stride, dims, input_x_index, input_x_stride,
offset_index_base_pos, input_grad_base_pos, input_x_base_pos, input_x, input_offset,
DeformableOffsetGradKernel(offset_index, offset_stride, grad_stride, dims, input_x_stride, input_x, input_offset,
input_grad, output_grad_x, output_grad_offset);
}
};
@ -401,7 +390,7 @@ bool DeformableOffsetsGradCpuKernelMod::Launch(const std::vector<kernel::Address
template <typename T>
bool DeformableOffsetsGradCpuKernelMod::LaunchKernel(const std::vector<kernel::AddressPtr> &inputs,
const std::vector<kernel::AddressPtr> &workspace,
const std::vector<kernel::AddressPtr> &,
const std::vector<kernel::AddressPtr> &outputs) {
const size_t num_kernels =
dims_.x_n * dims_.offset_h * dims_.offset_w * dims_.kernel_h * dims_.kernel_w * dims_.deformable_group;
@ -451,7 +440,7 @@ int DeformableOffsetsGradCpuKernelMod::Resize(const BaseOperatorPtr &base_operat
<< ", but get " << input_size_list_.size() << " and " << output_size_list_.size();
return KRET_RESIZE_FAILED;
}
SetDims(base_operator, inputs, outputs);
SetDims(base_operator, inputs);
return KRET_OK;
}
@ -490,8 +479,7 @@ void DeformableOffsetsGradCpuKernelMod::CheckInOutNum(size_t inputs_num, size_t
}
void DeformableOffsetsGradCpuKernelMod::SetDims(const BaseOperatorPtr &base_operator,
const std::vector<KernelTensorPtr> &inputs,
const std::vector<KernelTensorPtr> &outputs) {
const std::vector<KernelTensorPtr> &inputs) {
auto kernel_ptr = std::dynamic_pointer_cast<ops::DeformableOffsetsGrad>(base_operator);
if (kernel_ptr == nullptr) {
MS_LOG(EXCEPTION) << "Cast DeformableOffsetsGrad failed!";

View File

@ -31,29 +31,31 @@ namespace mindspore {
namespace kernel {
using OpsDeformableOffsetsGradPtr = std::shared_ptr<ops::DeformableOffsetsGrad>;
struct DeformableOffsetGradDims {
size_t x_n;
size_t x_h;
size_t x_w;
size_t offset_h;
size_t offset_w;
size_t grad_h;
size_t grad_w;
size_t kernel_h;
size_t kernel_w;
size_t pad_top;
size_t pad_left;
size_t stride_h;
size_t stride_w;
size_t dilation_h;
size_t dilation_w;
size_t deformable_group;
size_t deformable_group_channel;
size_t x_n = 0;
size_t x_h = 0;
size_t x_w = 0;
size_t offset_h = 0;
size_t offset_w = 0;
size_t grad_h = 0;
size_t grad_w = 0;
size_t kernel_h = 0;
size_t kernel_w = 0;
size_t pad_top = 0;
size_t pad_left = 0;
size_t stride_h = 0;
size_t stride_w = 0;
size_t dilation_h = 0;
size_t dilation_w = 0;
size_t deformable_group = 0;
size_t deformable_group_channel = 0;
};
class DeformableOffsetsGradCpuKernelMod : public NativeCpuKernelMod,
public MatchKernelHelper<DeformableOffsetsGradCpuKernelMod> {
public:
DeformableOffsetsGradCpuKernelMod() { ResetResource(); }
DeformableOffsetsGradCpuKernelMod() : deformable_kernel_operator_(nullptr), data_format_(kOpFormat_NCHW) {
ResetResource();
}
~DeformableOffsetsGradCpuKernelMod() override = default;
bool Init(const BaseOperatorPtr &base_operator, const std::vector<KernelTensorPtr> &inputs,
@ -76,11 +78,10 @@ class DeformableOffsetsGradCpuKernelMod : public NativeCpuKernelMod,
void GetDataFormat();
void SetDims(const BaseOperatorPtr &base_operator, const std::vector<KernelTensorPtr> &inputs,
const std::vector<KernelTensorPtr> &outputs);
void SetDims(const BaseOperatorPtr &base_operator, const std::vector<KernelTensorPtr> &inputs);
template <typename T>
bool LaunchKernel(const std::vector<kernel::AddressPtr> &inputs, const std::vector<kernel::AddressPtr> &workspace,
bool LaunchKernel(const std::vector<kernel::AddressPtr> &inputs, const std::vector<kernel::AddressPtr> &,
const std::vector<kernel::AddressPtr> &outputs);
template <typename T>
@ -89,8 +90,6 @@ class DeformableOffsetsGradCpuKernelMod : public NativeCpuKernelMod,
template <typename T>
void DeformableOffsetGradNCHWKernel(size_t num_kernels, const DeformableOffsetGradDims &dims, T *input_x,
T *input_offset, T *input_grad, T *output_grad_x, T *output_grad_offset);
std::string kernel_name_;
OpsDeformableOffsetsGradPtr deformable_kernel_operator_;
std::string data_format_ = kOpFormat_NCHW;
DeformableOffsetGradDims dims_;

View File

@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2020-2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.

View File

@ -129,6 +129,7 @@ PrimitivePy &PrimitivePy::operator=(const PrimitivePy &other) {
if (this == &other) {
return *this;
}
Primitive::operator=(other);
python_obj_ = other.python_obj_;
bprop_cls_name_ = other.bprop_cls_name_;
adapter_ = other.adapter_;
@ -173,9 +174,6 @@ py::function PrimitivePy::GetBpropFunction() {
if (py::hasattr(python_obj_, get_bprop_func_name)) {
py::function fn = python_obj_.attr(get_bprop_func_name)().cast<py::function>();
return fn;
} else {
auto fn = GetBpropFunctionByObj(python_obj_);
return fn;
}
auto fn = GetBpropFunctionByObj(python_obj_);
return fn;

View File

@ -42,7 +42,7 @@ class PrimitivePy : public Primitive {
public:
explicit PrimitivePy(const std::string &name);
PrimitivePy(const PrimitivePy &prim_py);
PrimitivePy &operator=(const PrimitivePy &other);
virtual PrimitivePy &operator=(const PrimitivePy &other);
PrimitivePy(const py::object &python_obj, const PrimitivePyAdapterPtr &adapter);
~PrimitivePy() override;
MS_DECLARE_PARENT(PrimitivePy, Primitive);

View File

@ -55,7 +55,7 @@ class MS_CORE_API Named : public Value {
///
/// \param[in] other An existing Named object.
/// \return A Named object set with the same type, name and hash_id as other.
Named &operator=(const Named &other) {
virtual Named &operator=(const Named &other) {
if (&other != this) {
this->type_ = other.type_;
this->name_ = other.name_;

View File

@ -55,7 +55,7 @@ class MS_CORE_API Primitive : public Named {
///
/// \param[in] other An existing Primitive object.
/// \return A Primitive object set with the same members as other.
Primitive &operator=(const Primitive &other);
virtual Primitive &operator=(const Primitive &other);
MS_DECLARE_PARENT(Primitive, Named);
abstract::AbstractBasePtr ToAbstract() override;
abstract::AbstractBasePtr ToPrimAbstract(const AnfNodePtr &anf_node);

View File

@ -64,7 +64,7 @@ AbstractBasePtr DeformableOffsetsGradInfer(const abstract::AnalysisEnginePtr &,
MS_EXCEPTION_IF_NULL(primitive);
auto prim_name = primitive->name();
// check inputs num.
(void)CheckAndConvertUtils::CheckInputArgs(input_args, kEqual, kDeformableOffsetsGradInputSize, prim_name);
CheckAndConvertUtils::CheckInputArgs(input_args, kEqual, kDeformableOffsetsGradInputSize, prim_name);
auto out_grad_types = DeformableOffsetsGradInferType(primitive, input_args);
auto out_grad_shapes = DeformableOffsetsGradInferShape(primitive, input_args);
std::vector<abstract::AbstractBasePtr> out_grads_abs;

View File

@ -17,6 +17,7 @@ Extension functions.
Python functions that will be called in the c++ parts of MindSpore.
"""
from __future__ import absolute_import
from .utils import cell_attr_register
__all__ = ["cell_attr_register"]

View File

@ -13,6 +13,7 @@
# limitations under the License.
# ============================================================================
"""builtin_operations"""
from __future__ import absolute_import
import numpy as np
from mindspore.ops import functional as F
from mindspore.ops import composite as C

View File

@ -16,6 +16,7 @@
Interfaces for parser module in c++.
"""
from __future__ import absolute_import
from .parser import (Parser, create_instance, is_supported_create_instance_type, generate_scope,
get_bprop_method_of_class, get_class_instance_type, get_class_member_namespace_symbol,
create_slice_obj, get_obj_id, get_module_namespace, get_obj_type, get_object_key,

View File

@ -16,6 +16,7 @@
# ============================================================================
"""Define the namespace of parse."""
from __future__ import absolute_import
import builtins
from mindspore import log as logger

View File

@ -16,6 +16,7 @@
# ============================================================================
"""The module of parser python object, called by c++."""
from __future__ import absolute_import
import os
import sys
import ast

View File

@ -15,6 +15,8 @@
# limitations under the License.
# ============================================================================
"""Resources for ast tree parse."""
from __future__ import absolute_import
import ast
import math

View File

@ -16,6 +16,7 @@
# ============================================================================
"""standard_method"""
from __future__ import absolute_import
from mindspore import Tensor, CSRTensor, COOTensor, ms_class
from mindspore import dtype as mstype
@ -256,7 +257,7 @@ def strides_(x):
return strides
def astype(x, dtype, copy=True): # pylint: disable=redefined-outer-name
def astype(x, dtype, copy_tensor=True):
"""
Return a copy of the tensor, casted to a specified type.
@ -264,7 +265,7 @@ def astype(x, dtype, copy=True): # pylint: disable=redefined-outer-name
dtype (Union[:class:`mindspore.dtype`, str]): Designated tensor dtype, can be in format
of :class:`mindspore.dtype.float32` or `float32`.
Default: :class:`mindspore.dtype.float32`.
copy (bool, optional): By default, astype always returns a newly allocated
copy_tensor (bool, optional): By default, astype always returns a newly allocated
tensor. If this is set to false, the input tensor is returned instead
of a copy if possible. Default: True.
@ -286,7 +287,7 @@ def astype(x, dtype, copy=True): # pylint: disable=redefined-outer-name
Int32
"""
dtype = check_astype_dtype_const(dtype)
if not copy and dtype == x.dtype:
if not copy_tensor and dtype == x.dtype:
return x
return F.cast(x, dtype)
@ -1843,11 +1844,12 @@ def masked_fill(x, mask, value):
return F.masked_fill(x, mask, value)
def col2im(input_x, output_size, kernel_size, dilation, padding_value, stride):
def col2im(*inputs):
"""
inputs: input_x, output_size, kernel_size, dilation, padding_value, stride
Combines an array of sliding local blocks into a large containing tensor.
"""
return F.col2im(input_x, output_size, kernel_size, dilation, padding_value, stride)
return F.col2im(*inputs)
def narrow(x, axis, start, length):
@ -2420,9 +2422,8 @@ def list_hasnext(xs):
return len(xs) > 0
# pylint: disable=redefined-outer-name
def list_append(self_, item):
return _append(self_, item)
def list_append(self_, list_item):
return _append(self_, list_item)
def list_insert(self_, index, obj):

View File

@ -19,6 +19,7 @@
# This operation function is not meant to be called directly
# support operator symbol, ast
from __future__ import absolute_import
from operator import ( # noqa
add, sub, mul, truediv, floordiv, mod, eq, ne, lt, gt, le, ge, pos, neg,
not_, and_, or_, xor, lshift, rshift, invert, is_, is_not, contains,

View File

@ -14,6 +14,7 @@
# ============================================================================
"""Some utils."""
from __future__ import absolute_import
import inspect
from functools import wraps

View File

@ -13,6 +13,7 @@
# limitations under the License.
# ============================================================================
"""Patterns for describing graphs"""
from __future__ import absolute_import
from mindspore.ops import Primitive
from mindspore.common.tensor import Tensor
from mindspore._c_expression import Pattern, OneOf_, Prim_, Call_, NoneOf_, Any, NewTensor_, NewParameter_, Imm

View File

@ -13,6 +13,7 @@
# limitations under the License.
# ============================================================================
"""Reference for python pass registration."""
from __future__ import absolute_import
from .python_pass_register import register_pass, unregister_pass, gen_new_parameter, cancel_new_parameter, set_renorm,\
set_reopt

View File

@ -13,6 +13,7 @@
# limitations under the License.
# ============================================================================
"""Python pass register"""
from __future__ import absolute_import
from inspect import isfunction
from mindspore.graph_utils.graph_pattern import Pattern, NewParameter
from mindspore._c_expression import PyPassManager_