clean code warnings

This commit is contained in:
fan-jibin 2022-07-26 14:55:56 +08:00
parent 938ec236e8
commit 6d89900f4b
25 changed files with 31 additions and 43 deletions

View File

@ -72,10 +72,10 @@ bool CheckValidCpuKernelMod::LaunchKernel(const std::vector<kernel::AddressPtr>
const size_t right_y = i * 4 + 3;
bool valid_flag = false;
valid_flag |= std::less<T>()(anchor_box[left_x], ZERO);
valid_flag |= std::less<T>()(anchor_box[left_y], ZERO);
valid_flag |= std::less<double>()(img_width_x, static_cast<double>(anchor_box[right_x]));
valid_flag |= std::less<double>()(img_height_y, static_cast<double>(anchor_box[right_y]));
valid_flag = valid_flag || std::less<T>()(anchor_box[left_x], ZERO);
valid_flag = valid_flag || std::less<T>()(anchor_box[left_y], ZERO);
valid_flag = valid_flag || std::less<double>()(img_width_x, static_cast<double>(anchor_box[right_x]));
valid_flag = valid_flag || std::less<double>()(img_height_y, static_cast<double>(anchor_box[right_y]));
output[i] = !valid_flag;
}

View File

@ -79,7 +79,7 @@ bool DropoutGradBwdCpuKernelMod::Launch(const std::vector<AddressPtr> &inputs, c
template <typename T>
void DropoutGradBwdCpuKernelMod::DropoutBackwardKernel(const std::vector<AddressPtr> &inputs,
const std::vector<AddressPtr> &workspace,
const std::vector<AddressPtr> &outputs, float keep_prob) {
const std::vector<AddressPtr> &outputs, float keep_prob) const {
auto *output = reinterpret_cast<T *>(outputs[0]->addr);
const auto *input = reinterpret_cast<T *>(inputs[0]->addr);
const auto *mask = reinterpret_cast<T *>(inputs[1]->addr);

View File

@ -39,7 +39,7 @@ class DropoutGradBwdCpuKernelMod : public DeprecatedNativeCpuKernelMod {
private:
template <typename T>
void DropoutBackwardKernel(const std::vector<AddressPtr> &inputs, const std::vector<AddressPtr> &workspace,
const std::vector<AddressPtr> &outputs, float keep_prob);
const std::vector<AddressPtr> &outputs, float keep_prob) const;
float keep_prob_{1.0};
size_t num_count_{1};
TypeId dtype_{kTypeUnknown};

View File

@ -51,9 +51,9 @@ int MaskedFillCpuKernelMod::Resize(const BaseOperatorPtr &base_operator, const s
std::vector<int64_t> mask_shape = inputs.at(kIndex1)->GetShapeVector();
std::vector<int64_t> value_shape = inputs.at(kIndex2)->GetShapeVector();
std::vector<int64_t> output_shape = outputs.at(kIndex0)->GetShapeVector();
std::transform(input_shape.begin(), input_shape.end(), std::back_inserter(input_shape_), LongToSize);
std::transform(mask_shape.begin(), mask_shape.end(), std::back_inserter(mask_shape_), LongToSize);
std::transform(output_shape.begin(), output_shape.end(), std::back_inserter(output_shape_), LongToSize);
(void)std::transform(input_shape.begin(), input_shape.end(), std::back_inserter(input_shape_), LongToSize);
(void)std::transform(mask_shape.begin(), mask_shape.end(), std::back_inserter(mask_shape_), LongToSize);
(void)std::transform(output_shape.begin(), output_shape.end(), std::back_inserter(output_shape_), LongToSize);
need_broadcast_ = (input_shape_ == mask_shape_) ? false : true;
size_t batch_size = value_shape.size();
if (input_shape.size() < batch_size || mask_shape.size() < batch_size) {

View File

@ -122,7 +122,7 @@ bool AddNCpuKernelMod::LaunchKernel(const std::vector<kernel::AddressPtr> &input
return true;
}
void AddNCpuKernelMod::CheckParam(const CNodePtr &kernel_node) {
void AddNCpuKernelMod::CheckParam(const CNodePtr &kernel_node) const {
auto src0_shape = common::AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0);
auto dst_shape = AnfAlgo::GetOutputDeviceShape(kernel_node, 0);
if (AnfAlgo::IsShapesDynamic({src0_shape, dst_shape})) {

View File

@ -39,7 +39,7 @@ class AddNCpuKernelMod : public DeprecatedMKLCpuKernelMod {
std::vector<KernelAttr> GetOpSupport() override;
private:
void CheckParam(const CNodePtr &kernel_node);
void CheckParam(const CNodePtr &kernel_node) const;
template <typename T>
bool LaunchKernel(const std::vector<kernel::AddressPtr> &inputs, const std::vector<kernel::AddressPtr> &workspace,

View File

@ -18,7 +18,6 @@
#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_CONV_CPU_KERNEL_H_
#include <vector>
#include <memory>
#include "plugin/device/cpu/kernel/mkldnn/mkl_cpu_kernel.h"

View File

@ -19,7 +19,6 @@
#include <map>
#include <vector>
#include <memory>
#include "plugin/device/cpu/kernel/mkldnn/mkl_cpu_kernel.h"
namespace mindspore {

View File

@ -18,7 +18,6 @@
#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_LOG_SOFTMAX_GRAD_CPU_KERNEL_H_
#include <vector>
#include <memory>
#include "plugin/device/cpu/kernel/mkldnn/mkl_cpu_kernel.h"
namespace mindspore {

View File

@ -171,7 +171,7 @@ bool LstmCpuKernelMod::Launch(const std::vector<kernel::AddressPtr> &inputs, con
reinterpret_cast<float *>(inputs[kInputWeightIndex]->addr) + weight_size_ + weight_h_size_);
} else {
auto size = GetSize(bias_desc_);
if (memset_s(GetDataHandle(bias_memory_), size, 0, size)) {
if (memset_s(GetDataHandle(bias_memory_), size, 0, size) != EOK) {
MS_LOG(EXCEPTION) << "Bias memset error";
}
}

View File

@ -207,7 +207,7 @@ void LSTMGradCpuKernelMod::ResetMemory(const dnnl::memory &mem, const string nam
auto dst_ptr = GetDataHandle(mem);
auto mem_desc = GetMemDesc(mem);
auto size = GetSize(mem_desc);
if (memset_s(dst_ptr, size, 0, size)) {
if (memset_s(dst_ptr, size, 0, size) != EOK) {
MS_LOG(EXCEPTION) << name << " memset error";
}
}
@ -227,7 +227,7 @@ bool LSTMGradCpuKernelMod::Launch(const std::vector<kernel::AddressPtr> &inputs,
} else {
auto dst_ptr = GetDataHandle(bias_memory_);
auto size = GetSize(bias_desc_);
if (memset_s(dst_ptr, size, 0, size)) {
if (memset_s(dst_ptr, size, 0, size) != EOK) {
MS_LOG(EXCEPTION) << "Bias memset error";
}
}
@ -245,7 +245,7 @@ bool LSTMGradCpuKernelMod::Launch(const std::vector<kernel::AddressPtr> &inputs,
}
auto dst_ptr = GetDataHandle(diff_bias_memory_);
auto size = GetSize(diff_bias_desc_);
if (memset_s(dst_ptr, size, 0, size)) {
if (memset_s(dst_ptr, size, 0, size) != EOK) {
MS_LOG(EXCEPTION) << "Bias grad memset error";
}
SetArgumentHandleOp(inputs, outputs);

View File

@ -18,9 +18,6 @@
#define MINDSPORE_CCSRC_PLUGIN_DEVICE_CPU_KERNEL_MKLDNN_MATMUL_CPU_KERNEL_FUNC_H_
#include <vector>
#include <memory>
#include <map>
#include <string>
#include "plugin/device/cpu/kernel/mkldnn/mkl_cpu_kernel.h"
namespace mindspore {

View File

@ -137,7 +137,7 @@ class mkl_threadpool : public dnnl::threadpool_interop::threadpool_iface {
bool first_parallel{true};
public:
explicit mkl_threadpool(ThreadPool *tp) { tp_ = tp; }
explicit mkl_threadpool(ThreadPool *tp) : tp_(tp) {}
void set_num_threads(int num) { thread_num_ = num; }
int get_num_threads() const override { return std::min(SizeToInt(tp_->GetKernelThreadNum()), thread_num_); }
bool get_in_parallel() const override { return !first_parallel; }

View File

@ -17,10 +17,8 @@
#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_REDUCTION_CPU_KERNEL_H_
#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_REDUCTION_CPU_KERNEL_H_
#include <memory>
#include <vector>
#include <map>
#include <string>
#include <utility>
#include "plugin/device/cpu/kernel/mkldnn/mkl_cpu_kernel.h"

View File

@ -18,7 +18,6 @@
#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_SOFTMAX_CPU_KERNEL_H_
#include <vector>
#include <memory>
#include "plugin/device/cpu/kernel/mkldnn/mkl_cpu_kernel.h"
namespace mindspore {

View File

@ -48,7 +48,6 @@ class BACKEND_EXPORT FusedSparseFtrlCpuKernelMod : public SparseOptimizerCpuKern
std::vector<KernelAttr> GetOpSupport() override { return OpSupport(); }
void ResetResource() noexcept;
protected:
float lr_{0.0};
float l1_{0.0};
float l2_{0.0};

View File

@ -86,9 +86,9 @@ TuplePtr AdaptiveMaxPool3DInferType(const PrimitivePtr &primitive, const std::ve
const std::set<TypePtr> x_valid_types = {kInt8, kInt16, kInt32, kInt64, kUInt8, kUInt16,
kUInt32, kUInt64, kFloat16, kFloat32, kFloat64};
const std::set<TypePtr> output_size_valid_types = {kInt32};
CheckAndConvertUtils::CheckTensorTypeValid("x_dtype", x_dtype, x_valid_types, kNameAdaptiveMaxPool3D);
CheckAndConvertUtils::CheckTensorTypeValid("output_size_dtype", output_size_dtype, output_size_valid_types,
kNameAdaptiveMaxPool3D);
(void)CheckAndConvertUtils::CheckTensorTypeValid("x_dtype", x_dtype, x_valid_types, kNameAdaptiveMaxPool3D);
(void)CheckAndConvertUtils::CheckTensorTypeValid("output_size_dtype", output_size_dtype, output_size_valid_types,
kNameAdaptiveMaxPool3D);
return std::make_shared<Tuple>(std::vector<TypePtr>{x_dtype, output_size_dtype});
}
} // namespace

View File

@ -20,7 +20,6 @@
#include <algorithm>
#include <set>
#include <memory>
#include <numeric>
#include <vector>
#include "ops/base_operator.h"
#include "mindapi/base/types.h"

View File

@ -31,7 +31,7 @@ abstract::ShapePtr GatherInferShape(const PrimitivePtr &primitive, const std::ve
MS_EXCEPTION_IF_NULL(primitive);
const std::string &op_name = primitive->name();
const int64_t input_num = 2;
(void)CheckAndConvertUtils::CheckInputArgs(input_args, kGreaterEqual, input_num, op_name);
CheckAndConvertUtils::CheckInputArgs(input_args, kGreaterEqual, input_num, op_name);
abstract::AbstractTensorPtr indices =
CheckAndConvertUtils::CheckArgs<abstract::AbstractTensor>(op_name, input_args, 1);
abstract::AbstractTensorPtr params =
@ -101,7 +101,7 @@ TypePtr GatherInferType(const PrimitivePtr &primitive, const std::vector<Abstrac
MS_EXCEPTION_IF_NULL(primitive);
const std::string &op_name = primitive->name();
constexpr int64_t input_num = 2;
(void)CheckAndConvertUtils::CheckInputArgs(input_args, kGreaterEqual, input_num, op_name);
CheckAndConvertUtils::CheckInputArgs(input_args, kGreaterEqual, input_num, op_name);
std::set<TypePtr> valid_params_types = {kTensorType};
(void)CheckAndConvertUtils::CheckSubClass("params", input_args[kInputIndex0]->BuildType(), valid_params_types,
op_name);

View File

@ -53,8 +53,8 @@ abstract::ShapePtr AdaptiveMaxPool2DGradInferShape(const PrimitivePtr &primitive
CheckAndConvertUtils::CheckInRange("x_dim", x_dims, kIncludeBoth, {3, 4}, kNameAdaptiveMaxPool2DGrad);
CheckAndConvertUtils::CheckInRange("argmax_dim", argmax_dims, kIncludeBoth, {3, 4}, kNameAdaptiveMaxPool2DGrad);
CheckAndConvertUtils::CheckInteger("y_grad_dims", y_grad_dims, kEqual, x_dims, kNameAdaptiveMaxPool2DGrad);
CheckAndConvertUtils::CheckInteger("argmax_dims", argmax_dims, kEqual, x_dims, kNameAdaptiveMaxPool2DGrad);
(void)CheckAndConvertUtils::CheckInteger("y_grad_dims", y_grad_dims, kEqual, x_dims, kNameAdaptiveMaxPool2DGrad);
(void)CheckAndConvertUtils::CheckInteger("argmax_dims", argmax_dims, kEqual, x_dims, kNameAdaptiveMaxPool2DGrad);
if (y_grad_shape != argmax_shape) {
MS_EXCEPTION(ValueError) << "For '" << op_name
<< "', the shape of 'y_grad' should be consistent with the shape of 'argmax'.";

View File

@ -63,7 +63,7 @@ abstract::TupleShapePtr BatchNormGradGradInferShape(const PrimitivePtr &primitiv
const std::vector<AbstractBasePtr> &input_args) {
MS_EXCEPTION_IF_NULL(primitive);
auto prim_name = primitive->name();
(void)CheckAndConvertUtils::CheckInputArgs(input_args, kEqual, kBatchNormGradGradInputsNum, prim_name);
CheckAndConvertUtils::CheckInputArgs(input_args, kEqual, kBatchNormGradGradInputsNum, prim_name);
BaseShapePtr x_shape = input_args[kInputIndex0]->BuildShape();
BaseShapePtr dy_shape = input_args[kInputIndex1]->BuildShape();
BaseShapePtr scale_shape = input_args[kInputIndex2]->BuildShape();

View File

@ -16,6 +16,7 @@
#include "ops/grad/max_pool3d_grad_with_argmax.h"
#include <map>
#include <algorithm>
#include "ops/op_utils.h"
#include "mindapi/src/helper.h"
@ -88,7 +89,7 @@ Format MaxPool3DGradWithArgmax::get_format() const {
{"NCDHW", Format::NCDHW},
};
auto attr_value_str = GetValue<std::string>(value_ptr);
std::transform(attr_value_str.begin(), attr_value_str.end(), attr_value_str.begin(), toupper);
(void)std::transform(attr_value_str.begin(), attr_value_str.end(), attr_value_str.begin(), toupper);
auto iter = valid_dataformat.find(attr_value_str);
if (iter == valid_dataformat.end()) {
MS_LOG(EXCEPTION) << "Invalid format " << attr_value_str << ", use NCDHW";
@ -123,7 +124,7 @@ abstract::AbstractBasePtr MaxPool3DGradWithArgmaxInfer(const abstract::AnalysisE
const PrimitivePtr &primitive,
const std::vector<abstract::AbstractBasePtr> &input_args) {
const int64_t input_num = 3;
(void)CheckAndConvertUtils::CheckInputArgs(input_args, kEqual, input_num, primitive->name());
CheckAndConvertUtils::CheckInputArgs(input_args, kEqual, input_num, primitive->name());
auto infer_type = MaxPool3DGradWithArgmaxInferType(primitive, input_args);
auto infer_shape = MaxPool3DGradWithArgmaxInferShape(primitive, input_args);
return abstract::MakeAbstract(infer_shape, infer_type);

View File

@ -17,7 +17,6 @@
#ifndef MINDSPORE_CORE_OPS_MAX_POOL3D_GRAD_WITH_ARGMAX_H_
#define MINDSPORE_CORE_OPS_MAX_POOL3D_GRAD_WITH_ARGMAX_H_
#include <algorithm>
#include <memory>
#include <set>
#include <string>

View File

@ -15,7 +15,6 @@
*/
#include "ops/grad/pool_grad.h"
#include <cctype>
#include "ops/op_utils.h"
#include "mindapi/src/helper.h"
@ -110,7 +109,7 @@ PadMode PoolGrad::get_pad_mode() const {
return PadMode(GetValue<int64_t>(value_ptr));
}
auto attr_value_str = GetValue<std::string>(value_ptr);
std::transform(attr_value_str.begin(), attr_value_str.end(), attr_value_str.begin(), toupper);
(void)std::transform(attr_value_str.begin(), attr_value_str.end(), attr_value_str.begin(), toupper);
auto iter = pad_map.find(attr_value_str);
if (iter == pad_map.end()) {
MS_LOG(EXCEPTION) << "Invalid pad mode " << attr_value_str << " use CALCULATED, PAD, VALID or SAME";
@ -125,7 +124,7 @@ Format PoolGrad::get_format() const {
return Format(GetValue<int64_t>(value_ptr));
}
auto attr_value_str = GetValue<std::string>(value_ptr);
std::transform(attr_value_str.begin(), attr_value_str.end(), attr_value_str.begin(), toupper);
(void)std::transform(attr_value_str.begin(), attr_value_str.end(), attr_value_str.begin(), toupper);
auto iter = dataformat_map.find(attr_value_str);
if (iter == dataformat_map.end()) {
MS_LOG(EXCEPTION) << "Invalid format " << attr_value_str << " use NCHW, NHWC NCDHW or NDHWC";

View File

@ -94,7 +94,7 @@ Format MaxPool3DWithArgmax::get_format() const {
{"NCDHW", Format::NCDHW},
};
auto attr_value_str = GetValue<std::string>(value_ptr);
std::transform(attr_value_str.begin(), attr_value_str.end(), attr_value_str.begin(), toupper);
(void)std::transform(attr_value_str.begin(), attr_value_str.end(), attr_value_str.begin(), toupper);
auto iter = valid_dataformat.find(attr_value_str);
if (iter == valid_dataformat.end()) {
MS_LOG(EXCEPTION) << "Invalid format " << attr_value_str << ", use NCDHW";
@ -113,7 +113,7 @@ TypeId MaxPool3DWithArgmax::get_argmax_type() const {
{"int64", TypeId::kNumberTypeInt64},
};
auto attr_value_str = GetValue<std::string>(value_ptr);
std::transform(attr_value_str.begin(), attr_value_str.end(), attr_value_str.begin(), toupper);
(void)std::transform(attr_value_str.begin(), attr_value_str.end(), attr_value_str.begin(), toupper);
auto iter = valid_argmax_type.find(attr_value_str);
if (iter == valid_argmax_type.end()) {
MS_LOG(EXCEPTION) << "Invalid argmax type " << attr_value_str << ", use int64 or int32";