forked from mindspore-Ecosystem/mindspore
!31549 Clean code
Merge pull request !31549 from tanghuikang/clean_code
This commit is contained in:
commit
7a8edeea50
|
@ -54,7 +54,7 @@ Status SomasSolverPre::AddContiguousInfoInMap(const vector<vector<size_t>> &cont
|
|||
for (size_t i = 0; i < aux.size() - 1; i++) {
|
||||
auto index1 = aux[i];
|
||||
auto index2 = aux[i + 1];
|
||||
if (CheckTensors(pTensors, index1, index2) == FAILED) {
|
||||
if (CheckTensors(pTensors, SizeToUint(index1), SizeToUint(index2)) == FAILED) {
|
||||
return FAILED;
|
||||
}
|
||||
tensors[index1]->right_ = tensors[index2];
|
||||
|
|
|
@ -26,7 +26,6 @@
|
|||
#include "cxx_api/graph/graph_data.h"
|
||||
|
||||
namespace mindspore::session {
|
||||
|
||||
void MultiGraphAclSession::Init(uint32_t device_id) { InitExecutor(kDavinciMultiGraphInferenceDevice, device_id); }
|
||||
|
||||
GraphId MultiGraphAclSession::CompileGraphImpl(const AnfNodePtrList &lst, const AnfNodePtrList &outputs) {
|
||||
|
|
|
@ -38,7 +38,10 @@ EventRecordTask::EventRecordTask(const ModelContext &model_context,
|
|||
event_ = event_list[event_id];
|
||||
}
|
||||
|
||||
EventRecordTask::~EventRecordTask() {}
|
||||
EventRecordTask::~EventRecordTask() {
|
||||
stream_ = nullptr;
|
||||
event_ = nullptr;
|
||||
}
|
||||
|
||||
void EventRecordTask::Distribute() {
|
||||
MS_LOG(INFO) << "EventRecordTask Distribute start, stream: " << stream_ << ", event: " << event_
|
||||
|
|
|
@ -51,6 +51,8 @@ LabelGotoTask::~LabelGotoTask() {
|
|||
}
|
||||
index_value_ = nullptr;
|
||||
}
|
||||
stream_ = nullptr;
|
||||
rt_model_handle_ = nullptr;
|
||||
}
|
||||
|
||||
void LabelGotoTask::Distribute() {
|
||||
|
|
|
@ -562,13 +562,13 @@ void TbeKernelSelect::PrintSupportedFormat(const SupportFormat &support_format)
|
|||
auto output_items = support_format.output_format.at(i);
|
||||
std::string print_str = "[";
|
||||
for (const auto &input : input_items) {
|
||||
print_str.append(input);
|
||||
print_str.append(", ");
|
||||
(void)print_str.append(input);
|
||||
(void)print_str.append(", ");
|
||||
}
|
||||
print_str.append("] -->");
|
||||
(void)print_str.append("] -->");
|
||||
for (const auto &output : output_items) {
|
||||
print_str.append(output);
|
||||
print_str.append(", ");
|
||||
(void)print_str.append(output);
|
||||
(void)print_str.append(", ");
|
||||
}
|
||||
MS_LOG(INFO) << "Support format: " << print_str;
|
||||
}
|
||||
|
|
|
@ -221,10 +221,6 @@ void TbeUtils::UpdateCache(const std::string &kernel_name) {
|
|||
KernelPackPtr TbeUtils::SearchCache(const std::string &kernel_name, const bool is_akg) {
|
||||
// search cache.
|
||||
KernelMeta *bin_map = KernelMeta::GetInstance();
|
||||
if (bin_map == nullptr) {
|
||||
MS_LOG(INFO) << "kernel cache is invalid.";
|
||||
return nullptr;
|
||||
}
|
||||
return bin_map->GetKernelPack(kernel_name, is_akg);
|
||||
}
|
||||
|
||||
|
|
|
@ -29,7 +29,7 @@ namespace mindspore {
|
|||
namespace opt {
|
||||
class BatchMatmulFusedMulAddFusionPass : public FusionBasePass {
|
||||
public:
|
||||
explicit BatchMatmulFusedMulAddFusionPass(FusionIdAllocatorPtr idAllocator)
|
||||
explicit BatchMatmulFusedMulAddFusionPass(const FusionIdAllocatorPtr &idAllocator)
|
||||
: FusionBasePass("BatchMatmulFusedMulAddFusionPass", idAllocator) {
|
||||
PassSwitchManager::GetInstance().RegistLicPass(name(), OptPassEnum::BatchMatmulFusedMulAddFusionPass);
|
||||
}
|
||||
|
|
|
@ -35,7 +35,7 @@ AnfNodePtr BCEWithLogitsLossFission::AddReduceNode(const FuncGraphPtr &func_grap
|
|||
// Copy a new sigmoid node, shape of output is the same as input
|
||||
std::vector<AnfNodePtr> new_simoid_inputs = {
|
||||
NewValueNode(std::make_shared<Primitive>(prim::kPrimBCEWithLogitsLoss->name()))};
|
||||
new_simoid_inputs.insert(new_simoid_inputs.end(), cnode->inputs().begin() + 1, cnode->inputs().end());
|
||||
(void)new_simoid_inputs.insert(new_simoid_inputs.end(), cnode->inputs().begin() + 1, cnode->inputs().end());
|
||||
CNodePtr new_cnode = NewCNode(new_simoid_inputs, func_graph);
|
||||
MS_EXCEPTION_IF_NULL(new_cnode);
|
||||
auto predict_input = cnode->inputs()[kIndex1];
|
||||
|
|
|
@ -29,10 +29,10 @@ CNodePtr AddCastNode(const FuncGraphPtr &func_graph, const TypeId dst_type, cons
|
|||
std::vector<AnfNodePtr> new_cast_inputs = {NewValueNode(std::make_shared<Primitive>(prim::kPrimCast->name()))};
|
||||
BaseShapePtr shape;
|
||||
if (fir_flag) {
|
||||
new_cast_inputs.emplace_back(input_node->inputs()[kIndex1]);
|
||||
(void)new_cast_inputs.emplace_back(input_node->inputs()[kIndex1]);
|
||||
shape = common::AnfAlgo::GetOutputDetailShape(input_node->inputs()[kIndex1], 0);
|
||||
} else {
|
||||
new_cast_inputs.emplace_back(input_node);
|
||||
(void)new_cast_inputs.emplace_back(input_node);
|
||||
shape = common::AnfAlgo::GetOutputDetailShape(input_node, 0);
|
||||
}
|
||||
CNodePtr new_cast = NewCNode(new_cast_inputs, func_graph);
|
||||
|
|
|
@ -281,7 +281,7 @@ CNodePtr CreateExpandDimsPynative(const FuncGraphPtr &graph, const CNodePtr &rea
|
|||
|
||||
expand_dims_node->set_scope(real_div_node->scope());
|
||||
std::vector<size_t> y_shape = common::AnfAlgo::GetOutputInferShape(real_div_node, 0);
|
||||
y_shape.emplace_back(1);
|
||||
(void)y_shape.emplace_back(1);
|
||||
if (AnfUtils::IsShapeDynamic(y_shape)) {
|
||||
auto min_shape = common::AnfAlgo::GetOutputMinShape(real_div_node, 0);
|
||||
auto max_shape = common::AnfAlgo::GetOutputMaxShape(real_div_node, 0);
|
||||
|
@ -552,8 +552,8 @@ const AnfNodePtr GradSparseSoftmaxCrossEntropyWithLogitsUnifyMindIR::Process(con
|
|||
|
||||
auto manager = graph->manager();
|
||||
MS_EXCEPTION_IF_NULL(manager);
|
||||
manager->Replace(sparse_softmax_node, reduce_node);
|
||||
manager->Replace(mul_node, new_mul_node);
|
||||
(void)manager->Replace(sparse_softmax_node, reduce_node);
|
||||
(void)manager->Replace(mul_node, new_mul_node);
|
||||
std::vector<AnfNodePtr> inputs = {NewValueNode(std::make_shared<Primitive>(prim::kPrimDepend->name())),
|
||||
NewValueNode(MakeValue<bool>(true)), NewValueNode(MakeValue<bool>(true))};
|
||||
auto new_depend = graph->NewCNode(inputs);
|
||||
|
|
Loading…
Reference in New Issue