forked from mindspore-Ecosystem/mindspore
clean code
This commit is contained in:
parent
ae19b79f00
commit
bab79c39ce
|
@ -1911,7 +1911,7 @@ FunctionBlockPtr Parser::ParseWhile(const FunctionBlockPtr &block, const py::obj
|
|||
AnfNodePtr while_condition_node = header_block->ForceToWhileCond(condition_node);
|
||||
UpdateInterpretForUserNode(while_condition_node, condition_node);
|
||||
while_condition_node = HandleInterpret(header_block, while_condition_node, test_node);
|
||||
header_block->ConditionalJump(while_condition_node, body_block, after_block);
|
||||
(void)header_block->ConditionalJump(while_condition_node, body_block, after_block);
|
||||
|
||||
body_block->Mature();
|
||||
// Parse loop body statements with loop context.
|
||||
|
@ -2617,7 +2617,7 @@ void Parser::UpdateInterpretForUserNode(const AnfNodePtr &user_node, const AnfNo
|
|||
}
|
||||
}
|
||||
|
||||
void Parser::UpdateInterpretForUserNode(const AnfNodePtr &user_node, const std::vector<AnfNodePtr> &nodes) {
|
||||
void Parser::UpdateInterpretForUserNode(const AnfNodePtr &user_node, const std::vector<AnfNodePtr> &nodes) const {
|
||||
for (auto &node : nodes) {
|
||||
UpdateInterpretForUserNode(user_node, node);
|
||||
}
|
||||
|
@ -2663,7 +2663,7 @@ bool Parser::IsTensorType(const AnfNodePtr &node, const std::string &script_text
|
|||
}
|
||||
|
||||
bool Parser::CheckNeedConvertInterpret(const FunctionBlockPtr &block, const AnfNodePtr &node,
|
||||
const string &script_text) {
|
||||
const string &script_text) const {
|
||||
MS_EXCEPTION_IF_NULL(block);
|
||||
MS_EXCEPTION_IF_NULL(node);
|
||||
// If the Tensor is present as type, should not convert Interpret node.
|
||||
|
|
|
@ -234,7 +234,7 @@ class Parser {
|
|||
const std::map<std::string, AnfNodePtr> &local_keys, const FuncGraphPtr &func_graph) const;
|
||||
// Set the interpret flag for the node calling the interpret node.
|
||||
void UpdateInterpretForUserNode(const AnfNodePtr &user_node, const AnfNodePtr &node) const;
|
||||
void UpdateInterpretForUserNode(const AnfNodePtr &user_node, const std::vector<AnfNodePtr> &nodes);
|
||||
void UpdateInterpretForUserNode(const AnfNodePtr &user_node, const std::vector<AnfNodePtr> &nodes) const;
|
||||
// Make interpret node.
|
||||
AnfNodePtr MakeInterpretNode(const FunctionBlockPtr &block, const AnfNodePtr &value_node, const string &script_text);
|
||||
// Convert interpret iter node to list.
|
||||
|
@ -243,7 +243,8 @@ class Parser {
|
|||
// Check if the node need interpreting.
|
||||
AnfNodePtr HandleInterpret(const FunctionBlockPtr &block, const AnfNodePtr &value_node,
|
||||
const py::object &value_object);
|
||||
bool CheckNeedConvertInterpret(const FunctionBlockPtr &block, const AnfNodePtr &node, const string &script_text);
|
||||
bool CheckNeedConvertInterpret(const FunctionBlockPtr &block, const AnfNodePtr &node,
|
||||
const string &script_text) const;
|
||||
// Handle interpret for augassign expression.
|
||||
AnfNodePtr HandleInterpretForAugassign(const FunctionBlockPtr &block, const AnfNodePtr &augassign_node,
|
||||
const py::object &op_object, const py::object &target_object,
|
||||
|
|
|
@ -486,7 +486,7 @@ AnfNodePtr ResolveSequenceWithAttr(const FuncGraphManagerPtr &manager, const py:
|
|||
// Resolve Cell instances.
|
||||
for (size_t i = 0; i < sequence_size; ++i) {
|
||||
auto res = ResolveCellWithAttr(manager, sequence[i], resolve_node, attr, get_attr_node);
|
||||
inputs.emplace_back(res);
|
||||
(void)inputs.emplace_back(res);
|
||||
}
|
||||
} else if (count_msclass == sequence_size) {
|
||||
// Resolve MsClass instances.
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
#include <list>
|
||||
#include <vector>
|
||||
#include <stack>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
#include <memory>
|
||||
#include <algorithm>
|
||||
|
@ -369,7 +370,7 @@ class SideEffectFinder {
|
|||
}
|
||||
|
||||
// Gets branch graphs from a switch cnode.
|
||||
std::vector<FuncGraphPtr> GetSwitchBranches(const CNodePtr &cnode) {
|
||||
std::vector<FuncGraphPtr> GetSwitchBranches(const CNodePtr &cnode) const {
|
||||
MS_EXCEPTION_IF_NULL(cnode);
|
||||
constexpr size_t switch_cnode_size = 4;
|
||||
constexpr size_t true_index = 2;
|
||||
|
@ -1152,7 +1153,7 @@ class SideEffectFinder {
|
|||
}
|
||||
}
|
||||
|
||||
void AddMonadForCaller(const CNodePtr &caller, const EffectInfo &info) {
|
||||
void AddMonadForCaller(const CNodePtr &caller, const EffectInfo &info) const {
|
||||
if (info.memory || info.load) {
|
||||
// Add u monad argument to caller if need.
|
||||
AddMonadArgument(caller, kUMonad);
|
||||
|
|
|
@ -17,8 +17,6 @@
|
|||
#ifndef MINDSPORE_CCSRC_PIPELINE_JIT_PARSE_AUTO_MONAD_H_
|
||||
#define MINDSPORE_CCSRC_PIPELINE_JIT_PARSE_AUTO_MONAD_H_
|
||||
|
||||
#include <string>
|
||||
|
||||
#include "ir/anf.h"
|
||||
#include "ir/func_graph.h"
|
||||
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
#include <algorithm>
|
||||
#include <utility>
|
||||
#include <cfloat>
|
||||
#include <cmath>
|
||||
|
||||
#include "ir/value.h"
|
||||
#include "ir/tensor.h"
|
||||
|
@ -39,9 +40,9 @@ bool ValueToBool(const ValuePtr &v, bool *value) {
|
|||
} else if (v->isa<UInt32Imm>()) {
|
||||
*value = v->cast<UInt32ImmPtr>()->value() != 0;
|
||||
} else if (v->isa<FP32Imm>()) {
|
||||
*value = v->cast<FP32ImmPtr>()->value() != 0;
|
||||
*value = fabs(v->cast<FP32ImmPtr>()->value()) > FLT_EPSILON;
|
||||
} else if (v->isa<FP64Imm>()) {
|
||||
*value = v->cast<FP64ImmPtr>()->value() != 0;
|
||||
*value = fabs(v->cast<FP64ImmPtr>()->value()) > DBL_EPSILON;
|
||||
} else if (v->isa<tensor::Tensor>()) {
|
||||
auto tensor = v->cast<tensor::TensorPtr>();
|
||||
MS_EXCEPTION_IF_NULL(tensor);
|
||||
|
|
|
@ -74,47 +74,47 @@ py::object ScalarPtrToPyData(const ScalarPtr &value) {
|
|||
case kNumberTypeUInt8:
|
||||
MS_LOG(DEBUG) << "uint8";
|
||||
int_v = value->cast<UInt8ImmPtr>()->value();
|
||||
return std::move(int_v);
|
||||
return int_v;
|
||||
case kNumberTypeUInt16:
|
||||
MS_LOG(DEBUG) << "uint16";
|
||||
int_v = value->cast<UInt16ImmPtr>()->value();
|
||||
return std::move(int_v);
|
||||
return int_v;
|
||||
case kNumberTypeUInt32:
|
||||
MS_LOG(DEBUG) << "uint32";
|
||||
int_v = value->cast<UInt32ImmPtr>()->value();
|
||||
return std::move(int_v);
|
||||
return int_v;
|
||||
case kNumberTypeUInt64:
|
||||
MS_LOG(DEBUG) << "uint64";
|
||||
int_v = value->cast<UInt64ImmPtr>()->value();
|
||||
return std::move(int_v);
|
||||
return int_v;
|
||||
case kNumberTypeInt8:
|
||||
MS_LOG(DEBUG) << "int8";
|
||||
int_v = value->cast<Int8ImmPtr>()->value();
|
||||
return std::move(int_v);
|
||||
return int_v;
|
||||
case kNumberTypeInt16:
|
||||
MS_LOG(DEBUG) << "int16";
|
||||
int_v = value->cast<Int16ImmPtr>()->value();
|
||||
return std::move(int_v);
|
||||
return int_v;
|
||||
case kNumberTypeInt32:
|
||||
MS_LOG(DEBUG) << "int32";
|
||||
int_v = value->cast<Int32ImmPtr>()->value();
|
||||
return std::move(int_v);
|
||||
return int_v;
|
||||
case kNumberTypeInt64:
|
||||
MS_LOG(DEBUG) << "int64";
|
||||
int_v = value->cast<Int64ImmPtr>()->value();
|
||||
return std::move(int_v);
|
||||
return int_v;
|
||||
case kNumberTypeFloat32:
|
||||
MS_LOG(DEBUG) << "float";
|
||||
float_v = value->cast<FP32ImmPtr>()->value();
|
||||
return std::move(float_v);
|
||||
return float_v;
|
||||
case kNumberTypeFloat64:
|
||||
MS_LOG(DEBUG) << "double";
|
||||
float_v = value->cast<FP64ImmPtr>()->value();
|
||||
return std::move(float_v);
|
||||
return float_v;
|
||||
case kNumberTypeBool:
|
||||
MS_LOG(DEBUG) << "bool";
|
||||
bool_v = value->cast<BoolImmPtr>()->value();
|
||||
return std::move(bool_v);
|
||||
return bool_v;
|
||||
default:
|
||||
MS_EXCEPTION(TypeError) << "Unsupported scalar converted to py data: " << value->ToString();
|
||||
}
|
||||
|
@ -355,19 +355,19 @@ py::object BuiltinsToPyData(const Any &value) {
|
|||
if (value.is<int>()) {
|
||||
MS_LOG(DEBUG) << "int";
|
||||
py::int_ ret = value.cast<int>();
|
||||
return std::move(ret);
|
||||
return ret;
|
||||
} else if (value.is<float>()) {
|
||||
MS_LOG(DEBUG) << "float";
|
||||
py::float_ ret = value.cast<float>();
|
||||
return std::move(ret);
|
||||
return ret;
|
||||
} else if (value.is<double>()) {
|
||||
MS_LOG(DEBUG) << "double";
|
||||
py::float_ ret = value.cast<double>();
|
||||
return std::move(ret);
|
||||
return ret;
|
||||
} else {
|
||||
MS_LOG(DEBUG) << "bool";
|
||||
py::bool_ ret = value.cast<bool>();
|
||||
return std::move(ret);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -375,19 +375,19 @@ py::object BuiltinsToPyData(const BaseRef &value) {
|
|||
if (utils::isa<int>(value)) {
|
||||
MS_LOG(DEBUG) << "int";
|
||||
py::int_ ret = utils::cast<int>(value);
|
||||
return std::move(ret);
|
||||
return ret;
|
||||
} else if (utils::isa<float>(value)) {
|
||||
MS_LOG(DEBUG) << "float";
|
||||
py::float_ ret = utils::cast<float>(value);
|
||||
return std::move(ret);
|
||||
return ret;
|
||||
} else if (utils::isa<double>(value)) {
|
||||
MS_LOG(DEBUG) << "double";
|
||||
py::float_ ret = utils::cast<double>(value);
|
||||
return std::move(ret);
|
||||
return ret;
|
||||
} else {
|
||||
MS_LOG(DEBUG) << "bool";
|
||||
py::bool_ ret = utils::cast<bool>(value);
|
||||
return std::move(ret);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -562,12 +562,11 @@ ShapeVector ConvertToShapeVector(const VectorRef &value_list, size_t index) {
|
|||
ShapeVector shape;
|
||||
if (index >= value_list.size()) {
|
||||
MS_LOG(EXCEPTION) << "Index " << index << " is out of range of " << value_list.size();
|
||||
return shape;
|
||||
}
|
||||
BaseRef ref = value_list[index];
|
||||
MS_EXCEPTION_IF_NULL(ref);
|
||||
|
||||
auto converter = [](BaseRef ref) {
|
||||
auto converter = [](const BaseRef &ref) {
|
||||
auto tensorptr = utils::cast<tensor::TensorPtr>(ref);
|
||||
MS_EXCEPTION_IF_NULL(tensorptr);
|
||||
if (tensorptr->DataDim() != 0) {
|
||||
|
@ -578,10 +577,10 @@ ShapeVector ConvertToShapeVector(const VectorRef &value_list, size_t index) {
|
|||
};
|
||||
|
||||
if (utils::isa<tensor::Tensor>(ref)) {
|
||||
std::transform(value_list.begin() + index, value_list.end(), std::back_inserter(shape), converter);
|
||||
(void)std::transform(value_list.begin() + index, value_list.end(), std::back_inserter(shape), converter);
|
||||
} else if (utils::isa<VectorRef>(ref)) {
|
||||
VectorRef shape_ref = utils::cast<VectorRef>(ref);
|
||||
std::transform(shape_ref.begin(), shape_ref.end(), std::back_inserter(shape), converter);
|
||||
(void)std::transform(shape_ref.begin(), shape_ref.end(), std::back_inserter(shape), converter);
|
||||
} else if (utils::isa<ValueTuple>(ref)) {
|
||||
ValueTuplePtr shape_tuple = utils::cast<ValueTuplePtr>(ref);
|
||||
shape = ConvertShapeTupleToShapeVector(shape_tuple);
|
||||
|
|
Loading…
Reference in New Issue