fix clean code & size_t bugs

This commit is contained in:
jjfeing 2022-07-05 14:50:43 +08:00
parent a6ac3d6073
commit c417a292b0
7 changed files with 9 additions and 15 deletions

View File

@ -1,5 +1,5 @@
/**
* Copyright 2020-2022 Huawei Technologies Co., Ltd
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -17,7 +17,6 @@
#include "kernel/oplib/oplib.h"
#include "include/common/utils/utils.h"
#include "include/common/utils/anfalgo.h"
#include "utils/log_adapter.h"
namespace mindspore::opt {
@ -75,7 +74,7 @@ ConstInputToAttrRegister::RegisterHelper::RegisterHelper(const string &name, con
va_list var_ptr;
va_start(var_ptr, len);
for (int i = 0; i < len; ++i) {
input_to_attr.insert(va_arg(var_ptr, size_t));
input_to_attr.insert(static_cast<size_t>(va_arg(var_ptr, int)));
}
va_end(var_ptr);
ConstInputToAttrRegister::GetInstance().RegConstToAttr(name, backend, is_dynamic_shape, input_to_attr);

View File

@ -57,13 +57,10 @@ RER_ASCEND_STATIC_CONST_TO_ATTR(kCumprodOpName, 1);
RER_ASCEND_STATIC_CONST_TO_ATTR(kCumSumOpName, 1);
RER_ASCEND_STATIC_CONST_TO_ATTR(kDepthwiseConv2dNativeBackpropFilterOpName, 1);
RER_ASCEND_STATIC_CONST_TO_ATTR(kDepthwiseConv2dNativeBackpropInputOpName, 0);
// RER_ASCEND_STATIC_CONST_TO_ATTR(kEmbeddingLookupCommGradOpName, 1);
// RER_ASCEND_STATIC_CONST_TO_ATTR(kEmbeddingLookupOpName, 2, 3, 4, 5);
RER_ASCEND_STATIC_CONST_TO_ATTR(kErfOpName, 1);
RER_ASCEND_STATIC_CONST_TO_ATTR(kExpandDimsOpName, 1);
RER_ASCEND_STATIC_CONST_TO_ATTR(kEyeOpName, 0, 1, 2);
RER_ASCEND_STATIC_CONST_TO_ATTR(kFlattenGradOpName, 1);
// RER_ASCEND_STATIC_CONST_TO_ATTR(kGatherDOpName, 1);
RER_ASCEND_STATIC_CONST_TO_ATTR(kGatherOpName, 2);
RER_ASCEND_STATIC_CONST_TO_ATTR(kMeanGradOpName, 1);
RER_ASCEND_STATIC_CONST_TO_ATTR(kOneHotOpName, 1);

View File

@ -62,8 +62,6 @@ RER_GPU_STATIC_CONST_TO_ATTR(kCumprodOpName, 1);
RER_GPU_STATIC_CONST_TO_ATTR(kCumSumOpName, 1);
RER_GPU_STATIC_CONST_TO_ATTR(kDepthwiseConv2dNativeBackpropFilterOpName, 1);
RER_GPU_STATIC_CONST_TO_ATTR(kDepthwiseConv2dNativeBackpropInputOpName, 0);
// RER_GPU_STATIC_CONST_TO_ATTR(kEmbeddingLookupCommGradOpName, 1);
// RER_GPU_STATIC_CONST_TO_ATTR(kEmbeddingLookupOpName, 2, 3, 4, 5);
RER_GPU_STATIC_CONST_TO_ATTR(kErfOpName, 1);
RER_GPU_STATIC_CONST_TO_ATTR(kExpandDimsOpName, 1);
RER_GPU_STATIC_CONST_TO_ATTR(kEyeOpName, 0, 1, 2);

View File

@ -665,7 +665,7 @@ void ConstructInputTensor(const OpExecInfoPtr &op_run_info, std::vector<int64_t>
MS_EXCEPTION_IF_NULL(tensors_mask);
MS_EXCEPTION_IF_NULL(input_tensors);
mindspore::HashSet<size_t> input_to_attr;
mindspore::HashSet<size_t> input_to_attr = {};
bool need_convert_input_to_attr = NeedConvertConstInputToAttr(op_run_info, &input_to_attr);
MS_LOG(DEBUG) << "Need convert input to addr " << need_convert_input_to_attr;
if (need_convert_input_to_attr) {

View File

@ -269,7 +269,7 @@ void AddAscendIRFusionPass(PassManager *ir_fusion_pm) {
ir_fusion_pm->AddPass(std::make_shared<BNReduceGradConv2dBackpropFilterFusion>());
ir_fusion_pm->AddPass(std::make_shared<SoftmaxDropoutDoMaskV3Fusion>());
const auto &pass_creators =
opt::Factory<PatternProcessPass>::Instance().GetPassCreatorsByType(kPassType::kIRFusionFisionPass);
opt::Factory<PatternProcessPass>::Instance().GetPassCreatorsByType(kPassType::kIRFusionFissionPass);
for (const auto &pass_creator : pass_creators) {
ir_fusion_pm->AddPass(pass_creator.second());
}
@ -464,7 +464,7 @@ void RunOpAscendBackendIRFusionOptimization(const std::shared_ptr<session::Kerne
ir_fusion_pm->AddPass(std::make_shared<RenormSplit>());
ir_fusion_pm->AddPass(std::make_shared<EraseVisitAttr>());
const auto &pass_creators =
opt::Factory<PatternProcessPass>::Instance().GetPassCreatorsByType(kPassType::kIRFusionFisionPass);
opt::Factory<PatternProcessPass>::Instance().GetPassCreatorsByType(kPassType::kIRFusionFissionPass);
for (const auto &pass_creator : pass_creators) {
ir_fusion_pm->AddPass(pass_creator.second());
}

View File

@ -209,5 +209,5 @@ const AnfNodePtr TopKSplit::Process(const FuncGraphPtr &func_graph, const AnfNod
return new_cnode;
}
MS_PASS_FACTORY_REG(PatternProcessPass, topk_split_fission, TopKSplit, kIRFusionFisionPass);
MS_PASS_FACTORY_REG(PatternProcessPass, topk_split_fission, TopKSplit, kIRFusionFissionPass);
} // namespace mindspore::opt

View File

@ -30,7 +30,7 @@
namespace mindspore::opt {
enum kPassType {
kMindIRPass = 0,
kIRFusionFisionPass,
kIRFusionFissionPass,
kUBFusionPass,
};
@ -43,13 +43,13 @@ class Factory {
void operator=(const Factory &) = delete;
static Factory &Instance() {
static Factory instance;
static Factory instance{};
return instance;
}
void Register(kPassType pass_type, const std::string &name, CreatorFunc &&creator) {
if (IsRegistered(pass_type, name)) {
MS_LOG(ERROR) << "Pass " << name << " is already registered!";
MS_LOG(WARNING) << "Pass " << name << " is already registered!";
}
(void)pass_creators_[pass_type].emplace(name, creator);
}