forked from mindspore-Ecosystem/mindspore
update clang format rule
This commit is contained in:
parent
31a12009dd
commit
c2b3360d69
|
@ -94,7 +94,7 @@ PenaltyBreakString: 1000
|
||||||
PenaltyBreakTemplateDeclaration: 10
|
PenaltyBreakTemplateDeclaration: 10
|
||||||
PenaltyExcessCharacter: 1000000
|
PenaltyExcessCharacter: 1000000
|
||||||
PenaltyReturnTypeOnItsOwnLine: 200
|
PenaltyReturnTypeOnItsOwnLine: 200
|
||||||
PointerAlignment: Left
|
PointerAlignment: Right
|
||||||
RawStringFormats:
|
RawStringFormats:
|
||||||
- Language: Cpp
|
- Language: Cpp
|
||||||
Delimiters:
|
Delimiters:
|
||||||
|
|
|
@ -1199,9 +1199,9 @@ FuncGraphPtr TensorSlice::GenerateFuncGraph(const AbstractBasePtrList& args_spec
|
||||||
return ret_graph;
|
return ret_graph;
|
||||||
}
|
}
|
||||||
|
|
||||||
REGISTER_PYBIND_DEFINE(
|
REGISTER_PYBIND_DEFINE(TupleAdd_, ([](const py::module *m) {
|
||||||
TupleAdd_, ([](const py::module* m) {
|
(void)py::class_<TupleAdd, MetaFuncGraph, std::shared_ptr<TupleAdd>>(*m, "TupleAdd_")
|
||||||
(void)py::class_<TupleAdd, MetaFuncGraph, std::shared_ptr<TupleAdd>>(*m, "TupleAdd_").def(py::init<std::string&>());
|
.def(py::init<std::string &>());
|
||||||
}));
|
}));
|
||||||
|
|
||||||
REGISTER_PYBIND_DEFINE(TupleSlice_, ([](const py::module *m) {
|
REGISTER_PYBIND_DEFINE(TupleSlice_, ([](const py::module *m) {
|
||||||
|
|
|
@ -161,8 +161,8 @@ class CostGraph {
|
||||||
// Applying Triangle Elimination in DP algorithm. return the left_node
|
// Applying Triangle Elimination in DP algorithm. return the left_node
|
||||||
OperatorInfoPtr EliminationTriangle(const OperatorInfoPtr &elimi_op, const EdgePtr &edge_left_right);
|
OperatorInfoPtr EliminationTriangle(const OperatorInfoPtr &elimi_op, const EdgePtr &edge_left_right);
|
||||||
void CreateTriangleEliminationCostList(const OperatorInfoPtr &, const CostPtrList &, const CostPtrList &,
|
void CreateTriangleEliminationCostList(const OperatorInfoPtr &, const CostPtrList &, const CostPtrList &,
|
||||||
const StrategyPtr&, const StrategyPtr&, const StrategyPtr&, const CostPtrList&,
|
const StrategyPtr &, const StrategyPtr &, const StrategyPtr &,
|
||||||
const CostPtrList&, const CostPtrList&, CostPtrList*);
|
const CostPtrList &, const CostPtrList &, const CostPtrList &, CostPtrList *);
|
||||||
// Given the relevant costlist, create the TriangleElimination cost
|
// Given the relevant costlist, create the TriangleElimination cost
|
||||||
void CreateTriangleEliminationSubCostList(StrategyPtr, StrategyPtr, StrategyPtr, const CostPtr &, const CostPtrList &,
|
void CreateTriangleEliminationSubCostList(StrategyPtr, StrategyPtr, StrategyPtr, const CostPtr &, const CostPtrList &,
|
||||||
const CostPtrList &, const CostPtr &, const CostPtrList &, CostPtrList *);
|
const CostPtrList &, const CostPtr &, const CostPtrList &, CostPtrList *);
|
||||||
|
@ -170,11 +170,11 @@ class CostGraph {
|
||||||
// Applying the Star Elimination in DP algorithm. Return the successive edges of this merged_op
|
// Applying the Star Elimination in DP algorithm. Return the successive edges of this merged_op
|
||||||
// NOTE: this elimination MUST be performed only when the above 5 operation cannot be applied.
|
// NOTE: this elimination MUST be performed only when the above 5 operation cannot be applied.
|
||||||
std::vector<EdgePtr> EliminationStar(const OperatorInfoPtr &op);
|
std::vector<EdgePtr> EliminationStar(const OperatorInfoPtr &op);
|
||||||
void CreateStarEliminationCostList(std::vector<EdgePtr>&, const StrategyPtr&, const CostPtrList&, const CostPtrList&,
|
void CreateStarEliminationCostList(std::vector<EdgePtr> &, const StrategyPtr &, const CostPtrList &,
|
||||||
const StrategyPtr&, const CostPtrList&, CostPtrList*);
|
const CostPtrList &, const StrategyPtr &, const CostPtrList &, CostPtrList *);
|
||||||
void CreateStarEliminationSubCostList(const StrategyPtr&, const CostPtrList&, const CostPtrList&, const StrategyPtr&,
|
void CreateStarEliminationSubCostList(const StrategyPtr &, const CostPtrList &, const CostPtrList &,
|
||||||
const CostPtrList&, std::vector<StrategyPtr>, CostPtrList&, CostPtrList&,
|
const StrategyPtr &, const CostPtrList &, std::vector<StrategyPtr>,
|
||||||
CostPtrList*);
|
CostPtrList &, CostPtrList &, CostPtrList *);
|
||||||
// When the input of a operator is neither a WEIGHT, nor a output of a subsequent operator involving WEIGHT, then
|
// When the input of a operator is neither a WEIGHT, nor a output of a subsequent operator involving WEIGHT, then
|
||||||
// the memory cost can be resused.
|
// the memory cost can be resused.
|
||||||
Status CalculateOpsMemoryCost();
|
Status CalculateOpsMemoryCost();
|
||||||
|
|
|
@ -195,7 +195,8 @@ double ActivationCost::GetBackwardComputationCost(const std::vector<TensorInfo>&
|
||||||
}
|
}
|
||||||
|
|
||||||
// Return the per device communication cost in the forward phase.
|
// Return the per device communication cost in the forward phase.
|
||||||
double SoftmaxCost::GetForwardCommCost(const std::vector<TensorInfo>&, const std::vector<TensorInfo>&, int32_t) const {
|
double SoftmaxCost::GetForwardCommCost(const std::vector<TensorInfo> &, const std::vector<TensorInfo> &,
|
||||||
|
int32_t) const {
|
||||||
// In the forward phase, the communication cost = 0
|
// In the forward phase, the communication cost = 0
|
||||||
return 0.0;
|
return 0.0;
|
||||||
}
|
}
|
||||||
|
@ -262,7 +263,8 @@ double TmpIdentityCost::GetForwardComputationCost(const std::vector<mindspore::p
|
||||||
// Return the per device computation cost in the backward phase. The cost is calculated according to the bytes
|
// Return the per device computation cost in the backward phase. The cost is calculated according to the bytes
|
||||||
// this operator uses
|
// this operator uses
|
||||||
double TmpIdentityCost::GetBackwardComputationCost(const std::vector<mindspore::parallel::TensorInfo> &,
|
double TmpIdentityCost::GetBackwardComputationCost(const std::vector<mindspore::parallel::TensorInfo> &,
|
||||||
const std::vector<mindspore::parallel::TensorInfo>&, int32_t) const {
|
const std::vector<mindspore::parallel::TensorInfo> &,
|
||||||
|
int32_t) const {
|
||||||
return 0.0;
|
return 0.0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -386,7 +388,8 @@ double OneHotCost::GetForwardCommCost(const std::vector<TensorInfo>&, const std:
|
||||||
}
|
}
|
||||||
|
|
||||||
// return the per device communication cost in the backward phase.
|
// return the per device communication cost in the backward phase.
|
||||||
double OneHotCost::GetBackwardCommCost(const std::vector<TensorInfo>&, const std::vector<TensorInfo>&, int32_t) const {
|
double OneHotCost::GetBackwardCommCost(const std::vector<TensorInfo> &, const std::vector<TensorInfo> &,
|
||||||
|
int32_t) const {
|
||||||
// onehot does not need communication in the backward phase
|
// onehot does not need communication in the backward phase
|
||||||
return 0.0;
|
return 0.0;
|
||||||
}
|
}
|
||||||
|
@ -509,8 +512,8 @@ double ArithmeticCost::GetForwardComputationCost(const std::vector<TensorInfo>&
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
double ArithmeticCost::GetBackwardComputationCost(const std::vector<TensorInfo>& inputs, const std::vector<TensorInfo>&,
|
double ArithmeticCost::GetBackwardComputationCost(const std::vector<TensorInfo> &inputs,
|
||||||
int32_t stage_id) const {
|
const std::vector<TensorInfo> &, int32_t stage_id) const {
|
||||||
double result = 0.0;
|
double result = 0.0;
|
||||||
CheckGlobalDeviceManager();
|
CheckGlobalDeviceManager();
|
||||||
MS_EXCEPTION_IF_NULL(g_device_manager);
|
MS_EXCEPTION_IF_NULL(g_device_manager);
|
||||||
|
@ -689,7 +692,8 @@ double DropOutCost::GetForwardComputationCost(const std::vector<TensorInfo>& inp
|
||||||
}
|
}
|
||||||
|
|
||||||
// return the per device communication cost in the forward phase.
|
// return the per device communication cost in the forward phase.
|
||||||
double GatherV2Cost::GetForwardCommCost(const std::vector<TensorInfo>&, const std::vector<TensorInfo>&, int32_t) const {
|
double GatherV2Cost::GetForwardCommCost(const std::vector<TensorInfo> &, const std::vector<TensorInfo> &,
|
||||||
|
int32_t) const {
|
||||||
// GatherV2Cost does not need communication in the forward phase
|
// GatherV2Cost does not need communication in the forward phase
|
||||||
return 0.0;
|
return 0.0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -56,7 +56,9 @@ class CustomOperator : public Operator {
|
||||||
|
|
||||||
void CustomOutputRegister(const string &name) { Operator::OutputRegister(name); }
|
void CustomOutputRegister(const string &name) { Operator::OutputRegister(name); }
|
||||||
|
|
||||||
void CustomInferFuncRegister(const std::function<graphStatus(Operator&)>& func) { Operator::InferFuncRegister(func); }
|
void CustomInferFuncRegister(const std::function<graphStatus(Operator &)> &func) {
|
||||||
|
Operator::InferFuncRegister(func);
|
||||||
|
}
|
||||||
};
|
};
|
||||||
} // namespace ge
|
} // namespace ge
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue