forked from mindspore-Ecosystem/mindspore
!23702 pclint clean
Merge pull request !23702 from zhangzhaoju/master_pclint
This commit is contained in:
commit
c60e900000
|
@ -92,7 +92,7 @@ class AnfExporter {
|
|||
void OutputParameters(std::ofstream &ofs, const std::vector<AnfNodePtr> ¶meters,
|
||||
OrderedMap<AnfNodePtr, int, ParamPtrHasher, ParamPtrEqual> *param_map);
|
||||
|
||||
void OutputStatementComment(std::ofstream &ofs, const CNodePtr &node);
|
||||
virtual void OutputStatementComment(std::ofstream &ofs, const CNodePtr &node);
|
||||
void OutputOrderList(std::ofstream &ofs, const FuncGraphPtr &func_graph);
|
||||
|
||||
void OutputCNodeText(std::ofstream &ofs, const CNodePtr &cnode, const FuncGraphPtr &func_graph, int *idx,
|
||||
|
|
|
@ -140,15 +140,16 @@ class AnalyzeFailExporter : public AnfExporter {
|
|||
void OutputCNode(std::ofstream &ofs, const CNodePtr &cnode, const FuncGraphPtr &func_graph, int *idx,
|
||||
std::map<AnfNodePtr, int> *const apply_map) override;
|
||||
|
||||
private:
|
||||
protected:
|
||||
std::string GetNodeType(const AnfNodePtr &nd) override;
|
||||
AbstractBasePtr GetNodeAbstract(const AnfNodePtr &nd);
|
||||
AnfNodeConfigPtr GetForwardConfig(const AnfNodeConfigPtr &cfg);
|
||||
void ProcessFuncGraphCall(const CNodePtr &node, std::string *const op_comment);
|
||||
void OutputStatementComment(std::ofstream &ofs, const CNodePtr &node);
|
||||
void OutputStatementComment(std::ofstream &ofs, const CNodePtr &node) override;
|
||||
std::unordered_map<FuncGraphPtr, TaggedNodeMap> CreateTaggedNodeMap(
|
||||
const std::vector<abstract::AnfNodeConfigPtr> &node_config_stack);
|
||||
|
||||
private:
|
||||
AnalysisContextPtr current_context_ = nullptr;
|
||||
AnalysisEnginePtr engine_ = nullptr;
|
||||
};
|
||||
|
|
|
@ -39,7 +39,6 @@ namespace ad {
|
|||
std::unordered_map<FuncGraphPtr, DFunctorPtr> DFunctor::func_graph_to_functor_;
|
||||
std::unordered_map<AnfNodePtr, AdjointPtr> DFunctor::anfnode_to_adjoin_definition_;
|
||||
|
||||
std::shared_ptr<PynativeDFunctor> py_dfunctor = std::make_shared<PynativeDFunctor>();
|
||||
bool lift_fv_before_grad = true;
|
||||
|
||||
DFunctor::DFunctor(const FuncGraphPtr &primal_graph, const pipeline::ResourceBasePtr &resources)
|
||||
|
@ -295,7 +294,7 @@ AdjointPtr DFunctor::MapMorphism(const AnfNodePtr &morph) {
|
|||
auto pynative_exec = pynative::PynativeExecutor::GetInstance();
|
||||
auto grad_exec = pynative_exec->grad_executor();
|
||||
if (grad_exec->eliminate_forward()) {
|
||||
py_dfunctor->ReplaceEquivdout(k_app, cnode_morph);
|
||||
PynativeDFunctor::ReplaceEquivdout(k_app, cnode_morph);
|
||||
cnode_morph->clear_inputs_value();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -27,15 +27,15 @@ namespace mindspore {
|
|||
namespace ad {
|
||||
class PynativeDFunctor {
|
||||
public:
|
||||
ValueNodePtr GenNewTensor(const CNodePtr &forward_node);
|
||||
tensor::TensorPtr GenNewTensorInner(const TypePtr &type_elem, const BaseShapePtr &shape_elem);
|
||||
void GetForwardOutNodeAndBpropGraph(const CNodePtr &k_app, CNodePtr *forward_node, FuncGraphPtr *bprop_graph,
|
||||
FuncGraphPtr *fprop_graph);
|
||||
std::vector<AnfNodePtr> RunOutputReplace(const CNodePtr &forward_node, const FuncGraphPtr &bprop_graph,
|
||||
const FuncGraphPtr &fprop_graph, const CNodePtr &cnode_morph);
|
||||
std::vector<AnfNodePtr> RunInputReplace(const FuncGraphPtr &bprop_graph, const FuncGraphPtr &fprop_graph,
|
||||
const CNodePtr &cnode_morph);
|
||||
void ReplaceEquivdout(const CNodePtr &k_app, const CNodePtr &cnode_morph);
|
||||
static ValueNodePtr GenNewTensor(const CNodePtr &forward_node);
|
||||
static tensor::TensorPtr GenNewTensorInner(const TypePtr &type_elem, const BaseShapePtr &shape_elem);
|
||||
static void GetForwardOutNodeAndBpropGraph(const CNodePtr &k_app, CNodePtr *forward_node, FuncGraphPtr *bprop_graph,
|
||||
FuncGraphPtr *fprop_graph);
|
||||
static std::vector<AnfNodePtr> RunOutputReplace(const CNodePtr &forward_node, const FuncGraphPtr &bprop_graph,
|
||||
const FuncGraphPtr &fprop_graph, const CNodePtr &cnode_morph);
|
||||
static std::vector<AnfNodePtr> RunInputReplace(const FuncGraphPtr &bprop_graph, const FuncGraphPtr &fprop_graph,
|
||||
const CNodePtr &cnode_morph);
|
||||
static void ReplaceEquivdout(const CNodePtr &k_app, const CNodePtr &cnode_morph);
|
||||
};
|
||||
} // namespace ad
|
||||
} // namespace mindspore
|
||||
|
|
|
@ -161,7 +161,7 @@ class TensorDataNumpy : public TensorData {
|
|||
const void *const_data() const override { return buffer_.ptr; }
|
||||
|
||||
/// To string.
|
||||
std::string ToString(const TypeId type, const ShapeVector &shape, bool use_comma) const override {
|
||||
std::string ToString(const TypeId, const ShapeVector &, bool use_comma) const override {
|
||||
if (use_comma) {
|
||||
// Call python np.array2string(data_, separator=', ') to convert string with comma.
|
||||
py::dict kwargs;
|
||||
|
|
|
@ -192,7 +192,7 @@ class MS_CORE_API PartialAbstractClosure : public AbstractFuncAtom {
|
|||
MS_DECLARE_PARENT(PartialAbstractClosure, AbstractFuncAtom)
|
||||
|
||||
AbstractFunctionPtr fn() { return fn_; }
|
||||
AbstractBasePtrList &args() { return args_spec_list_; }
|
||||
const AbstractBasePtrList &args() { return args_spec_list_; }
|
||||
ValuePtr RealBuildValue() const override { return fn_->BuildValue(); }
|
||||
AnfNodePtr node() { return node_.lock(); }
|
||||
void set_node(const AnfNodePtr &node) { node_ = AnfNodeWeakPtr(node); }
|
||||
|
|
|
@ -349,7 +349,7 @@ void FilterMonadInput(const AnfNodePtrList &old_inputs, AnfNodePtrList *new_inpu
|
|||
AnfNodePtr *possible_io_monad) {
|
||||
AnfNodePtr local_u_monad = nullptr, local_io_monad = nullptr;
|
||||
std::copy_if(old_inputs.cbegin(), old_inputs.cend(), std::back_inserter(*new_inputs),
|
||||
[&local_u_monad, &local_io_monad](const auto &input) {
|
||||
[&local_u_monad, &local_io_monad](const auto &input) -> bool {
|
||||
if (HasAbstractUMonad(input)) {
|
||||
if (local_u_monad != nullptr) {
|
||||
MS_LOG(EXCEPTION) << "Cannot have multiple U Monad in one call, first: "
|
||||
|
|
|
@ -326,7 +326,11 @@ def tensor_index_by_tensor(data, tensor_index):
|
|||
"""Tensor getitem by a single tensor"""
|
||||
min_data_dim, max_data_dim = 0, 7
|
||||
const_utils.judge_data_dim(data.ndim, min_data_dim, max_data_dim)
|
||||
const_utils.check_type_valid(F.dtype(tensor_index), mstype.int_type, const_utils.TENSOR_GETITEM)
|
||||
invalid = const_utils.check_type_invalid(F.dtype(tensor_index), mstype.int_type)
|
||||
if invalid:
|
||||
exp_msg = const_utils.gen_exception_msg(
|
||||
"The tensor index must be int type, but got {}.", F.dtype(tensor_index))
|
||||
const_utils.raise_index_error(exp_msg)
|
||||
return F.gather(data, tensor_index, 0)
|
||||
|
||||
|
||||
|
@ -427,7 +431,6 @@ def _tensor_getitem_by_tuple(data, tuple_index, op_name):
|
|||
tensor_indexes.append(tensor_index)
|
||||
tensor_positions += (i,)
|
||||
elif i in tensor_positions:
|
||||
const_utils.check_type_valid(F.dtype(index), mstype.int_type, op_name)
|
||||
invalid = const_utils.check_type_invalid(F.dtype(index), mstype.int_type)
|
||||
if invalid:
|
||||
exp_msg = const_utils.gen_exception_msg(
|
||||
|
|
Loading…
Reference in New Issue