forked from mindspore-Ecosystem/mindspore
update clang format rule
This commit is contained in:
parent
31a12009dd
commit
c2b3360d69
|
@ -94,7 +94,7 @@ PenaltyBreakString: 1000
|
|||
PenaltyBreakTemplateDeclaration: 10
|
||||
PenaltyExcessCharacter: 1000000
|
||||
PenaltyReturnTypeOnItsOwnLine: 200
|
||||
PointerAlignment: Left
|
||||
PointerAlignment: Right
|
||||
RawStringFormats:
|
||||
- Language: Cpp
|
||||
Delimiters:
|
||||
|
|
|
@ -1199,9 +1199,9 @@ FuncGraphPtr TensorSlice::GenerateFuncGraph(const AbstractBasePtrList& args_spec
|
|||
return ret_graph;
|
||||
}
|
||||
|
||||
REGISTER_PYBIND_DEFINE(
|
||||
TupleAdd_, ([](const py::module* m) {
|
||||
(void)py::class_<TupleAdd, MetaFuncGraph, std::shared_ptr<TupleAdd>>(*m, "TupleAdd_").def(py::init<std::string&>());
|
||||
REGISTER_PYBIND_DEFINE(TupleAdd_, ([](const py::module *m) {
|
||||
(void)py::class_<TupleAdd, MetaFuncGraph, std::shared_ptr<TupleAdd>>(*m, "TupleAdd_")
|
||||
.def(py::init<std::string &>());
|
||||
}));
|
||||
|
||||
REGISTER_PYBIND_DEFINE(TupleSlice_, ([](const py::module *m) {
|
||||
|
|
|
@ -161,8 +161,8 @@ class CostGraph {
|
|||
// Applying Triangle Elimination in DP algorithm. return the left_node
|
||||
OperatorInfoPtr EliminationTriangle(const OperatorInfoPtr &elimi_op, const EdgePtr &edge_left_right);
|
||||
void CreateTriangleEliminationCostList(const OperatorInfoPtr &, const CostPtrList &, const CostPtrList &,
|
||||
const StrategyPtr&, const StrategyPtr&, const StrategyPtr&, const CostPtrList&,
|
||||
const CostPtrList&, const CostPtrList&, CostPtrList*);
|
||||
const StrategyPtr &, const StrategyPtr &, const StrategyPtr &,
|
||||
const CostPtrList &, const CostPtrList &, const CostPtrList &, CostPtrList *);
|
||||
// Given the relevant costlist, create the TriangleElimination cost
|
||||
void CreateTriangleEliminationSubCostList(StrategyPtr, StrategyPtr, StrategyPtr, const CostPtr &, const CostPtrList &,
|
||||
const CostPtrList &, const CostPtr &, const CostPtrList &, CostPtrList *);
|
||||
|
@ -170,11 +170,11 @@ class CostGraph {
|
|||
// Applying the Star Elimination in DP algorithm. Return the successive edges of this merged_op
|
||||
// NOTE: this elimination MUST be performed only when the above 5 operation cannot be applied.
|
||||
std::vector<EdgePtr> EliminationStar(const OperatorInfoPtr &op);
|
||||
void CreateStarEliminationCostList(std::vector<EdgePtr>&, const StrategyPtr&, const CostPtrList&, const CostPtrList&,
|
||||
const StrategyPtr&, const CostPtrList&, CostPtrList*);
|
||||
void CreateStarEliminationSubCostList(const StrategyPtr&, const CostPtrList&, const CostPtrList&, const StrategyPtr&,
|
||||
const CostPtrList&, std::vector<StrategyPtr>, CostPtrList&, CostPtrList&,
|
||||
CostPtrList*);
|
||||
void CreateStarEliminationCostList(std::vector<EdgePtr> &, const StrategyPtr &, const CostPtrList &,
|
||||
const CostPtrList &, const StrategyPtr &, const CostPtrList &, CostPtrList *);
|
||||
void CreateStarEliminationSubCostList(const StrategyPtr &, const CostPtrList &, const CostPtrList &,
|
||||
const StrategyPtr &, const CostPtrList &, std::vector<StrategyPtr>,
|
||||
CostPtrList &, CostPtrList &, CostPtrList *);
|
||||
// When the input of a operator is neither a WEIGHT, nor a output of a subsequent operator involving WEIGHT, then
|
||||
// the memory cost can be resused.
|
||||
Status CalculateOpsMemoryCost();
|
||||
|
|
|
@ -195,7 +195,8 @@ double ActivationCost::GetBackwardComputationCost(const std::vector<TensorInfo>&
|
|||
}
|
||||
|
||||
// Return the per device communication cost in the forward phase.
|
||||
double SoftmaxCost::GetForwardCommCost(const std::vector<TensorInfo>&, const std::vector<TensorInfo>&, int32_t) const {
|
||||
double SoftmaxCost::GetForwardCommCost(const std::vector<TensorInfo> &, const std::vector<TensorInfo> &,
|
||||
int32_t) const {
|
||||
// In the forward phase, the communication cost = 0
|
||||
return 0.0;
|
||||
}
|
||||
|
@ -262,7 +263,8 @@ double TmpIdentityCost::GetForwardComputationCost(const std::vector<mindspore::p
|
|||
// Return the per device computation cost in the backward phase. The cost is calculated according to the bytes
|
||||
// this operator uses
|
||||
double TmpIdentityCost::GetBackwardComputationCost(const std::vector<mindspore::parallel::TensorInfo> &,
|
||||
const std::vector<mindspore::parallel::TensorInfo>&, int32_t) const {
|
||||
const std::vector<mindspore::parallel::TensorInfo> &,
|
||||
int32_t) const {
|
||||
return 0.0;
|
||||
}
|
||||
|
||||
|
@ -386,7 +388,8 @@ double OneHotCost::GetForwardCommCost(const std::vector<TensorInfo>&, const std:
|
|||
}
|
||||
|
||||
// return the per device communication cost in the backward phase.
|
||||
double OneHotCost::GetBackwardCommCost(const std::vector<TensorInfo>&, const std::vector<TensorInfo>&, int32_t) const {
|
||||
double OneHotCost::GetBackwardCommCost(const std::vector<TensorInfo> &, const std::vector<TensorInfo> &,
|
||||
int32_t) const {
|
||||
// onehot does not need communication in the backward phase
|
||||
return 0.0;
|
||||
}
|
||||
|
@ -509,8 +512,8 @@ double ArithmeticCost::GetForwardComputationCost(const std::vector<TensorInfo>&
|
|||
return result;
|
||||
}
|
||||
|
||||
double ArithmeticCost::GetBackwardComputationCost(const std::vector<TensorInfo>& inputs, const std::vector<TensorInfo>&,
|
||||
int32_t stage_id) const {
|
||||
double ArithmeticCost::GetBackwardComputationCost(const std::vector<TensorInfo> &inputs,
|
||||
const std::vector<TensorInfo> &, int32_t stage_id) const {
|
||||
double result = 0.0;
|
||||
CheckGlobalDeviceManager();
|
||||
MS_EXCEPTION_IF_NULL(g_device_manager);
|
||||
|
@ -689,7 +692,8 @@ double DropOutCost::GetForwardComputationCost(const std::vector<TensorInfo>& inp
|
|||
}
|
||||
|
||||
// return the per device communication cost in the forward phase.
|
||||
double GatherV2Cost::GetForwardCommCost(const std::vector<TensorInfo>&, const std::vector<TensorInfo>&, int32_t) const {
|
||||
double GatherV2Cost::GetForwardCommCost(const std::vector<TensorInfo> &, const std::vector<TensorInfo> &,
|
||||
int32_t) const {
|
||||
// GatherV2Cost does not need communication in the forward phase
|
||||
return 0.0;
|
||||
}
|
||||
|
|
|
@ -56,7 +56,9 @@ class CustomOperator : public Operator {
|
|||
|
||||
void CustomOutputRegister(const string &name) { Operator::OutputRegister(name); }
|
||||
|
||||
void CustomInferFuncRegister(const std::function<graphStatus(Operator&)>& func) { Operator::InferFuncRegister(func); }
|
||||
void CustomInferFuncRegister(const std::function<graphStatus(Operator &)> &func) {
|
||||
Operator::InferFuncRegister(func);
|
||||
}
|
||||
};
|
||||
} // namespace ge
|
||||
|
||||
|
|
Loading…
Reference in New Issue