Merge pull request !31494 from kisnwang/clean_code
This commit is contained in:
i-robot 2022-03-19 03:00:33 +00:00 committed by Gitee
commit 454f124ac5
No known key found for this signature in database
GPG Key ID: 173E9B9CA92EEF8F
21 changed files with 28 additions and 40 deletions

View File

@ -31,11 +31,10 @@ const AnfNodePtr CustomOpConstInputToAttr::Process(const FuncGraphPtr &, const A
auto cnode = node->cast<CNodePtr>();
MS_EXCEPTION_IF_NULL(cnode);
// Only process Custom operator.
if (!IsPrimitiveCNode(cnode, prim::kPrimCustom)) {
return nullptr;
}
auto primitive = common::AnfAlgo::GetCNodePrimitive(cnode);
MS_EXCEPTION_IF_NULL(primitive);
mindspore::HashSet<size_t> attr_indices;

View File

@ -123,14 +123,12 @@ const AnfNodePtr CustomOpRegInfoToAttr::Process(const FuncGraphPtr &, const AnfN
if (node == nullptr || !AnfUtils::IsRealCNodeKernel(node)) {
return nullptr;
}
auto cnode = node->cast<CNodePtr>();
MS_EXCEPTION_IF_NULL(cnode);
// Only process Custom operator.
if (!IsPrimitiveCNode(cnode, prim::kPrimCustom)) {
return nullptr;
}
auto primitive = common::AnfAlgo::GetCNodePrimitive(cnode);
MS_EXCEPTION_IF_NULL(primitive);
auto func_type = common::AnfAlgo::GetNodeAttr<std::string>(cnode, kAttrFuncType);
@ -148,7 +146,7 @@ const AnfNodePtr CustomOpRegInfoToAttr::Process(const FuncGraphPtr &, const AnfN
auto attr_names_vec = GetValue<std::vector<std::string>>(attr_names);
for (const auto &name : attr_names_vec) {
if (!primitive->HasAttr(name)) {
missing_attrs.insert(name);
(void)missing_attrs.insert(name);
}
}
if (missing_attrs.empty()) {

View File

@ -25,7 +25,7 @@ namespace mindspore {
namespace somas {
class SomasParameter {
public:
SomasParameter(size_t id, const std::string source_node_name, size_t index, const void *addr, size_t size)
SomasParameter(size_t id, const std::string &source_node_name, size_t index, const void *addr, size_t size)
: id_(id),
source_node_name_(source_node_name),
output_index_(index),

View File

@ -17,6 +17,7 @@
#include "kernel/environ_manager.h"
#include "utils/ms_utils.h"
#include "utils/log_adapter.h"
#include "include/common/utils/utils.h"
namespace mindspore {
namespace kernel {
@ -89,8 +90,8 @@ bool EnvironMgr::CheckEnvInput(const CNodePtr &kernel_node) const {
}
// Check the input value.
auto value_type = AnfAlgo::GetInputDeviceDataType(kernel_node, 2);
auto value_shapes = AnfAlgo::GetInputDeviceShape(kernel_node, 2);
auto value_type = AnfAlgo::GetInputDeviceDataType(kernel_node, kIndex2);
auto value_shapes = AnfAlgo::GetInputDeviceShape(kernel_node, kIndex2);
if ((value_type_attr == kObjectTypeEnvType) && (!IsScalarTensor(value_type, value_shapes))) {
MS_LOG(ERROR) << "The input value checks invalid, kernel: " << kernel_node->fullname_with_scope();
return false;

View File

@ -101,7 +101,7 @@ bool KernelPack::ReadFromJsonFile(const std::string &json_f, const std::string &
(void)kernel_json.read(json_->contents, SizeToLong(json_->len));
if (processor == kProcessorCpu) {
std::string bin_f = json_f.substr(0, json_f.length() - 5) + ".so";
std::string bin_f = json_f.substr(0, json_f.length() - kJsonSuffixLength) + ".so";
if (!CheckHash(json_f, bin_f, js)) {
return false;
}

View File

@ -694,7 +694,7 @@ std::string AscendKernelRuntime::GetDumpPath() {
}
#ifndef ENABLE_SECURITY
void AscendKernelRuntime::DumpTaskExceptionInfo(const session::KernelGraph & /* graph */) {
void AscendKernelRuntime::DumpTaskExceptionInfo(const session::KernelGraph &) {
const std::string path = GetDumpPath();
if (access(path.c_str(), F_OK) == 0) {
if (!DeleteDumpDir(path)) {

View File

@ -420,7 +420,6 @@ void AscendDeviceContext::PreprocessBeforeRunGraph(const KernelGraphPtr &graph)
MS_LOG(EXCEPTION) << "Preprocess failed before run graph " << graph->graph_id() << ", \nerror msg: " << e.what();
}
// TODO(dsj): for ms_function running in graph_mode. should be delete later
const std::vector<CNodePtr> &kernels = graph->execution_order();
for (const auto &kernel : kernels) {
common::AnfAlgo::SetNodeAttr(kAttrMSFunction, MakeValue(true), kernel);
@ -830,7 +829,6 @@ bool AscendDeviceContext::LaunchKernel(const CNodePtr &kernel, const vector<Addr
auto ms_context = MsContext::GetInstance();
MS_EXCEPTION_IF_NULL(ms_context);
// TODO(dsj): for ms_function running in graph_mode. should be delete later
if (!is_dynamic_shape || !(common::AnfAlgo::GetBooleanAttr(kernel, kAttrMSFunction))) {
std::lock_guard<std::mutex> locker(launch_mutex_);
// launch atomic clean
@ -845,7 +843,6 @@ bool AscendDeviceContext::LaunchKernel(const CNodePtr &kernel, const vector<Addr
MemoryCopyAsync(kernel, real_inputs, outputs);
} else {
MS_LOG(DEBUG) << "Launch kernel " << kernel->fullname_with_scope();
// TODO(dsj): for ms_function running in graph_mode. should be delete later
if (is_dynamic_shape && !(common::AnfAlgo::GetBooleanAttr(kernel, kAttrMSFunction))) {
ret = kernel_mod->Launch(real_inputs, workspace, outputs, GetKernelStream(kernel));
if (!ret) {

View File

@ -122,7 +122,6 @@ void AscendGraphOptimization::OptimizeExecutionOrder(const KernelGraphPtr &graph
}
#endif
// TODO(sida): do not hide nop op in kernel_by_kernel mode
if (graph->is_executing_sink()) {
opt::HideNopNode(graph.get());
}

View File

@ -43,8 +43,8 @@ bool LabelSetKernel::Init(const AnfNodePtr &anf_node) {
return true;
}
bool LabelSetKernel::Launch(const std::vector<AddressPtr> & /*inputs*/, const std::vector<AddressPtr> & /*workspace*/,
const std::vector<AddressPtr> & /*outputs*/, void * /*stream_ptr*/) {
bool LabelSetKernel::Launch(const std::vector<AddressPtr> &, const std::vector<AddressPtr> &,
const std::vector<AddressPtr> &, void *) {
MS_LOG(INFO) << "LabelSetKernel launch";
return true;
}

View File

@ -38,7 +38,7 @@ MemCpyAsyncKernel::MemCpyAsyncKernel() {}
MemCpyAsyncKernel::~MemCpyAsyncKernel() {}
bool MemCpyAsyncKernel::Launch(const std::vector<AddressPtr> &inputs, const std::vector<AddressPtr> & /* workspace */,
bool MemCpyAsyncKernel::Launch(const std::vector<AddressPtr> &inputs, const std::vector<AddressPtr> &,
const std::vector<AddressPtr> &outputs, void *stream_ptr) {
if (inputs.size() != 1) {
MS_LOG(ERROR) << "inputs size is not one";

View File

@ -36,7 +36,7 @@ TensorCopySlices::TensorCopySlices() {}
TensorCopySlices::~TensorCopySlices() {}
bool TensorCopySlices::Launch(const std::vector<AddressPtr> &inputs, const std::vector<AddressPtr> & /*workspace*/,
bool TensorCopySlices::Launch(const std::vector<AddressPtr> &inputs, const std::vector<AddressPtr> &,
const std::vector<AddressPtr> &outputs, void *stream_ptr) {
if (inputs.size() != 2) {
MS_LOG(ERROR) << "inputs size is not 2";

View File

@ -260,12 +260,12 @@ bool DynamicTbeKernelMod::Launch(const std::vector<AddressPtr> &inputs, const st
// pack all addresses into a vector.
std::vector<void *> runtimeargs;
(void)std::transform(std::begin(inputs), std::end(inputs), std::back_inserter(runtimeargs),
[](const AddressPtr &input) -> void * { return input->addr; });
[](const AddressPtr &input) { return input->addr; });
(void)std::transform(std::begin(outputs), std::end(outputs), std::back_inserter(runtimeargs),
[](const AddressPtr &output) -> void * { return output->addr; });
[](const AddressPtr &output) { return output->addr; });
if (!workspace.empty()) {
(void)std::transform(std::begin(workspace), std::end(workspace), std::back_inserter(runtimeargs),
[](const AddressPtr &addr) -> void * { return addr->addr; });
[](const AddressPtr &addr) { return addr->addr; });
}
if (!tiling_data_.empty() && tiling_data_ptr_ != nullptr) {

View File

@ -303,8 +303,6 @@ std::vector<size_t> FusionBuildTbeJsonCreator::GetDescOutputIndex(const std::vec
bool FusionBuildTbeJsonCreator::AttrsJsonPostProcessing(const AnfNodePtr &anf_node, const OpInfoPtr &op_info_ptr,
nlohmann::json *attrs_json) {
// just keep it
// tbe::TbeAdapter::CastAttrJsonPost(anf_node, attrs_json);
return true;
}

View File

@ -23,8 +23,7 @@
namespace mindspore {
namespace opt {
void BatchMatmulFusedMulAddFusionPass::MatchBatchMatmulFusedMulAdd(const CNodePtr &cnode,
const session::KernelGraph & /* kernel_graph */,
void BatchMatmulFusedMulAddFusionPass::MatchBatchMatmulFusedMulAdd(const CNodePtr &cnode, const session::KernelGraph &,
FusedNodeRecord *candidate_fusion) {
MS_EXCEPTION_IF_NULL(cnode);
MS_EXCEPTION_IF_NULL(candidate_fusion);

View File

@ -29,7 +29,7 @@ namespace mindspore {
namespace opt {
class BnupdateEltwiseEltwiseFusionPass : public FusionBasePass {
public:
explicit BnupdateEltwiseEltwiseFusionPass(FusionIdAllocatorPtr idAllocator)
explicit BnupdateEltwiseEltwiseFusionPass(const FusionIdAllocatorPtr &idAllocator)
: FusionBasePass("BnupdateEltwiseEltwiseFusionPass", idAllocator) {
PassSwitchManager::GetInstance().RegistLicPass(name(), OptPassEnum::BnupdateEltwiseEltwiseFusionPass);
}

View File

@ -24,7 +24,7 @@
namespace mindspore {
namespace opt {
void Conv2DBackpropEltwiseFusionPass::MatchConv2DBackpropInputEltwise(const CNodePtr &cnode,
const session::KernelGraph & /* kernel_graph */,
const session::KernelGraph &,
FusedNodeRecord *candidate_fusion) {
MS_EXCEPTION_IF_NULL(cnode);
MS_EXCEPTION_IF_NULL(candidate_fusion);

View File

@ -24,8 +24,7 @@
namespace mindspore {
namespace opt {
void DepthwiseConvEltwiseFusionPass::MatchDepthwiseConvRelu(const CNodePtr &cnode,
const session::KernelGraph & /* kernel_graph */,
void DepthwiseConvEltwiseFusionPass::MatchDepthwiseConvRelu(const CNodePtr &cnode, const session::KernelGraph &,
FusedNodeRecord *candidate_fusion, bool is_order) {
MS_EXCEPTION_IF_NULL(cnode);
MS_EXCEPTION_IF_NULL(candidate_fusion);

View File

@ -28,7 +28,7 @@ constexpr auto kAttrTransposeX2 = "transpose_x2";
} // namespace
void MatmulConfusionTranposeFusionPass::MatchMatmulConfusionTranpose(const CNodePtr &cnode,
const session::KernelGraph & /* kernel_graph */,
const session::KernelGraph &,
FusedNodeRecord *candidate_fusion) {
MS_EXCEPTION_IF_NULL(cnode);
MS_EXCEPTION_IF_NULL(candidate_fusion);

View File

@ -564,11 +564,9 @@ AnfNodePtr DynamicRnnGradFissionV2::CreateConcatNodeT1(const FuncGraphPtr &func_
auto reshape_in0 = NewCNode(reshape_inputs, func_graph);
std::vector<size_t> shape = {origin_input0_shape[kDim0] * origin_input0_shape[kDim1], origin_input0_shape[kDim2]};
common::AnfAlgo::SetOutputInferTypeAndShape({origin_input0_dtype}, {shape}, reshape_in0.get());
// t_size * batch_size, input_size (t_size = 1)
concat_inputs.push_back(reshape_in0);
(void)concat_inputs.emplace_back(reshape_in0);
} else {
// t_size, batch_size, input_size (t_size = 1)
concat_inputs.push_back(origin_input0);
(void)concat_inputs.emplace_back(origin_input0);
}
auto origin_input4 = dynamic_rnn_grad_cnode->input(kIndex5);

View File

@ -36,10 +36,10 @@ AnfNodePtr MatmulBiasaddFusion::CreateMatmulWithBias(const FuncGraphPtr &graph,
}
std::vector<AnfNodePtr> inputs;
inputs.emplace_back(NewValueNode(std::make_shared<Primitive>(prim::kPrimMatMul->name())));
inputs.emplace_back(GetAnfNodeByVar(equiv, x0_));
inputs.emplace_back(GetAnfNodeByVar(equiv, x1_));
inputs.emplace_back(GetAnfNodeByVar(equiv, x2_));
(void)inputs.emplace_back(NewValueNode(std::make_shared<Primitive>(prim::kPrimMatMul->name())));
(void)inputs.emplace_back(GetAnfNodeByVar(equiv, x0_));
(void)inputs.emplace_back(GetAnfNodeByVar(equiv, x1_));
(void)inputs.emplace_back(GetAnfNodeByVar(equiv, x2_));
auto new_node = NewCNode(inputs, graph);
MS_EXCEPTION_IF_NULL(new_node);
new_node->set_scope(node->scope());

View File

@ -180,7 +180,7 @@ CNodePtr AllToAllUnifyMindIR::CreateConcatNode(const FuncGraphPtr &graph, const
max_shape[LongToSize(concat_dim)] *= split_count;
min_shape[LongToSize(concat_dim)] *= split_count;
ShapeVector new_shape;
std::transform(single_shape.begin(), single_shape.end(), std::back_inserter(new_shape), SizeToLong);
(void)std::transform(single_shape.begin(), single_shape.end(), std::back_inserter(new_shape), SizeToLong);
common::AnfAlgo::SetOutputTypeAndDetailShape({common::AnfAlgo::GetOutputInferDataType(all_to_all_v_outputs[0], 0)},
{std::make_shared<abstract::Shape>(new_shape, min_shape, max_shape)},
concat.get());