clean code

This commit is contained in:
reku1997 2022-07-11 17:13:56 +08:00
parent 4a81173b1b
commit ffbdfb8cfa
27 changed files with 214 additions and 207 deletions

View File

@ -53,9 +53,9 @@ class MS_API GraphCell final : public Cell<GraphCell> {
GraphCell() = default;
~GraphCell() override = default;
explicit GraphCell(const Graph &);
explicit GraphCell(Graph &&);
explicit GraphCell(const std::shared_ptr<Graph> &);
explicit GraphCell(const Graph &graph);
explicit GraphCell(Graph &&graph);
explicit GraphCell(const std::shared_ptr<Graph> &graph);
void SetContext(const std::shared_ptr<Context> &context);
const std::shared_ptr<Graph> &GetGraph() const { return graph_; }
@ -76,7 +76,7 @@ class MS_API InputAndOutput {
InputAndOutput();
~InputAndOutput() = default;
InputAndOutput(const std::shared_ptr<CellBase> &, const std::vector<InputAndOutput> &, int32_t index);
InputAndOutput(const std::shared_ptr<CellBase> &cell, const std::vector<InputAndOutput> &prev, int32_t index);
int32_t GetIndex() const { return index_; }
void SetIndex(int32_t index) { index_ = index; }

View File

@ -649,13 +649,13 @@ void Somas::InitSomasEventInfos() {
#ifdef ENABLE_D
send_recv_map = device::ascend::AscendStreamAssign::GetInstance().get_event_map();
#endif
for (auto &send_recv : send_recv_map) {
for (const auto &send_recv : send_recv_map) {
size_t event_id = common::AnfAlgo::GetNodeAttr<uint32_t>(send_recv.first, kAttrEventId);
event_map_[event_id] = std::make_pair(send_recv.first, send_recv.second);
}
auto tensor_index = tensors_list_.size();
for (auto &event : event_map_) {
for (const auto &event : event_map_) {
std::pair<CNodePtr, CNodePtr> send_recv_pair = event.second;
auto send_iter = nodes_map_.find(send_recv_pair.first.get());
auto recv_iter = nodes_map_.find(send_recv_pair.second.get());
@ -686,7 +686,7 @@ void Somas::InitSomasEventInfos() {
SomasParameterPtr Somas::CreateSomasParameter(const AnfNodePtr &node, size_t index) {
MS_EXCEPTION_IF_NULL(node);
auto id = parameters_list_.size();
const void *addr = 0;
const void *addr = nullptr;
size_t dev_size = 0;
if (AnfAlgo::OutputAddrExist(node, index)) {
auto device_addr = AnfAlgo::GetOutputAddr(node, index);
@ -816,7 +816,7 @@ void Somas::SummaryInputProcess(const session::KernelGraph *graph) {
}
size_t total_summary_size = 0;
for (auto &node_item : summary_nodes) {
for (const auto &node_item : summary_nodes) {
auto origin_node = node_item.second.first;
size_t origin_index = IntToSize(node_item.second.second);
auto item_with_index = common::AnfAlgo::VisitKernelWithReturnType(origin_node, origin_index, true);
@ -980,10 +980,10 @@ void Somas::GenContiguousList(const session::KernelGraph *graph) {
// Contiguous input
if ((!node->input_tensors_.empty()) && (!node->input_tensors_[0]->contiguous_)) {
if (node->input_tensors_[0]->aligned_size_) {
if (node->input_tensors_[0]->aligned_size_ != 0) {
node->input_tensors_[0]->aligned_size_ += kGapSize;
}
if (node->input_tensors_[node->input_tensors_.size() - 1]->aligned_size_) {
if (node->input_tensors_[node->input_tensors_.size() - 1]->aligned_size_ != 0) {
node->input_tensors_[node->input_tensors_.size() - 1]->aligned_size_ += kGapSize;
}
std::vector<size_t> inputs;
@ -1002,10 +1002,10 @@ void Somas::GenContiguousList(const session::KernelGraph *graph) {
// Contiguous output
if ((!node->output_tensors_.empty()) && (!node->output_tensors_[0]->contiguous_)) {
if (node->output_tensors_[0]->aligned_size_) {
if (node->output_tensors_[0]->aligned_size_ != 0) {
node->output_tensors_[0]->aligned_size_ += kGapSize;
}
if (node->output_tensors_[node->output_tensors_.size() - 1]->aligned_size_) {
if (node->output_tensors_[node->output_tensors_.size() - 1]->aligned_size_ != 0) {
node->output_tensors_[node->output_tensors_.size() - 1]->aligned_size_ += kGapSize;
}
std::vector<size_t> outputs;
@ -1039,7 +1039,7 @@ void Somas::BuildConflictInfo(const std::shared_ptr<SomasTensor> &tensor, Tensor
tensor_conflict_info->r.id = consumer_list.at(1);
} else {
tensor_conflict_info->l.index = destination_node_list->size();
destination_node_list->insert(destination_node_list->end(), consumer_list.begin(), consumer_list.end());
destination_node_list->insert(destination_node_list->cend(), consumer_list.cbegin(), consumer_list.cend());
tensor_conflict_info->r.index = destination_node_list->size();
}
}
@ -1105,8 +1105,8 @@ void Somas::ComputeConflictPairs() {
MS_LOG(INFO) << "Threads Num is " << process_num;
int64_t start_index = 0;
int64_t total_size = candidate_tensor_list.size();
int64_t job_size = total_size / process_num;
int64_t total_size = SizeToLong(candidate_tensor_list.size());
int64_t job_size = total_size / SizeToLong(process_num);
if (job_size == 0) {
job_size = total_size;
}
@ -1172,8 +1172,8 @@ void Somas::UpdateTensorDestinations() {
// Loop to add edges from end to beginning of next group
for (const auto &group : streams_groups_) {
for (size_t i = 1; i < group.size(); i++) {
int64_t previous_stream = group[i - 1];
int64_t current_stream = group[i];
size_t previous_stream = group[i - 1];
size_t current_stream = group[i];
auto stream = GetSomasStream(previous_stream);
if (stream == nullptr) {
@ -1309,7 +1309,7 @@ bool Somas::Assign(const session::KernelGraph *graph) {
std::map<size_t, size_t> contiguous_list_with_ref_index_map = GetContiguousListContainRefTensor();
vector<vector<size_t>> contiguous_tensors_list_removed = contiguous_tensors_list_;
std::set<vector<size_t>> contiguous_tensors_list_to_remove;
for (auto ref_list_pair : contiguous_list_with_ref_index_map) {
for (const auto &ref_list_pair : contiguous_list_with_ref_index_map) {
contiguous_tensors_list_to_remove.insert(contiguous_tensors_list_[ref_list_pair.second]);
}
@ -1330,7 +1330,7 @@ bool Somas::Assign(const session::KernelGraph *graph) {
}
}
for (auto contiguous_list : contiguous_tensors_list_to_remove) {
for (const auto &contiguous_list : contiguous_tensors_list_to_remove) {
auto iterator =
std::find(contiguous_tensors_list_removed.begin(), contiguous_tensors_list_removed.end(), contiguous_list);
if (iterator != contiguous_tensors_list_removed.end()) {
@ -1397,7 +1397,7 @@ std::map<size_t, size_t> Somas::GetContiguousListContainRefTensor() {
std::map<size_t, size_t> contiguous_list_with_ref_index_map;
std::map<size_t, size_t> ref_tensors_in_contiguous_map = GetRefTensorsInContiguousList();
std::map<size_t, std::map<size_t, std::set<size_t>>> contiguous_ref_list_error_check_map;
for (auto ref_pair : ref_tensors_in_contiguous_map) {
for (const auto &ref_pair : ref_tensors_in_contiguous_map) {
size_t ref_first = ref_pair.first;
size_t ref_second = ref_pair.second;
bool found_first = false;
@ -1449,10 +1449,10 @@ std::map<size_t, size_t> Somas::GetContiguousListContainRefTensor() {
}
}
for (auto check_list_pair : contiguous_ref_list_error_check_map) {
for (const auto &check_list_pair : contiguous_ref_list_error_check_map) {
auto first_list = check_list_pair.first;
auto index_set_map = check_list_pair.second;
for (auto index_set : index_set_map) {
for (const auto &index_set : index_set_map) {
auto second_list = index_set.first;
if (contiguous_tensors_list_[first_list].size() != contiguous_tensors_list_[second_list].size()) {
MS_LOG(WARNING) << "Contiguous lists " << first_list << " and " << second_list
@ -1485,7 +1485,7 @@ std::map<size_t, size_t> Somas::GetRefTensorsInContiguousList() {
<< tensors_map_[ref_node_list[0]]->contiguous_ << ", " << ref_node_list[1] << ":"
<< tensors_map_[ref_node_list[1]]->contiguous_;
}
if (ref_node_list.size() == kRefNodeTensorNum && contiguous_in_ref_list == kRefNodeTensorNum) {
if (ref_node_list.size() == kRefNodeTensorNum && LongToSize(contiguous_in_ref_list) == kRefNodeTensorNum) {
ref_tensors_in_contiguous_map[ref_node_list[0]] = ref_node_list[1];
}
}
@ -1721,13 +1721,13 @@ std::string Somas::Offline() const {
<< ", start=" << tensor->lifetime_.start_ << ", end=" << tensor->lifetime_.end_ << std::endl;
} else {
std::map<size_t, size_t> dest_node_streams;
for (auto dest_node : tensor->destination_nodes_) {
for (const auto &dest_node : tensor->destination_nodes_) {
auto node = GetSomasNode(tensor->GetSourceNodeId());
MS_EXCEPTION_IF_NULL(node);
(void)dest_node_streams.emplace(dest_node, node->GetStreamId());
}
for (auto dest_info : dest_node_streams) {
for (const auto &dest_info : dest_node_streams) {
oss << "Somas EDGE src=n" << tensor->GetSourceNodeId() << ", srcstm=" << tensor->GetSourceStreamId()
<< ", dst=n" << dest_info.first << ", dststm=" << dest_info.second
<< ", workspace=" << static_cast<int>(tensor->type_ == kWorkspace) << ", size=" << tensor->GetOriginalSize()
@ -1811,7 +1811,7 @@ std::string Somas::SomasMemory() const {
auto node = GetSomasNode(place_tensor->GetSourceNodeId());
if (node != nullptr) {
scope_name = node->scope_full_name_;
src_stm_id = node->GetStreamId();
src_stm_id = SizeToLong(node->GetStreamId());
} else {
scope_name = "Somas Tensor";
}

View File

@ -157,14 +157,14 @@ class Somas {
SomasParameterPtr GetSomasParameter(const AnfNodePtr &node, size_t index);
SomasParameterPtr CreateSomasParameter(const AnfNodePtr &node, size_t index);
void InitCommonNodeInputs(bool is_all_nop_node, const CNodePtr &kernel);
void InitAtomicCleanInputs(bool is_all_nop_node, const CNodePtr &kernel);
void InitAtomicCleanInputs(bool enable_fusion_clear, const CNodePtr &kernel);
void ComputeOneTensorConflicts(const std::shared_ptr<SomasTensor> &target_tensor,
const std::vector<TensorConflictInfo> &tensor_conflict_info,
const std::vector<TensorConflictInfo> &tensor_conflict_info_list,
const std::vector<size_t> &destination_node_list,
const vector<DynamicBitSet> &nodes_dependency,
std::vector<DynamicBitSet> *tensor_relation) const;
void ComputeMultiTensorConflicts(const std::vector<SomasTensorPtr> &target_tensors_list,
const std::vector<TensorConflictInfo> &tensor_conflict_info,
const std::vector<TensorConflictInfo> &tensor_conflict_info_list,
const std::vector<size_t> &destination_node_list,
const vector<DynamicBitSet> &nodes_dependency,
std::vector<DynamicBitSet> *tensor_relation) const;

View File

@ -36,7 +36,7 @@ bool WorstFit(const pair<size_t, size_t> &a, const pair<size_t, size_t> &b) {
return a.second > b.second || (a.second == b.second && a.first < b.first);
}
size_t SharedObjects(FootPrint *p) { return p->Next()->getOffset(); }
size_t SingleObject(FootPrint *p) { return SIZE_MAX; }
size_t SingleObject(FootPrint *) { return SIZE_MAX; }
bool (*g_pBranching[kNumFittingTypes])(const pair<size_t, size_t> &a, const pair<size_t, size_t> &b) = {
BestFit, SmallestFit
@ -77,8 +77,6 @@ bool FootPrint::findFirst(vector<Interval> *interval_v, const BlockTensor &block
bool bfound = false;
auto fit_func = g_pBranching[m_branching_strategy_];
pair<size_t, size_t> fit_ret;
size_t gap;
Interval a;
size_t block_size;
@ -91,18 +89,23 @@ bool FootPrint::findFirst(vector<Interval> *interval_v, const BlockTensor &block
Interval top(m_offset_, m_offset_);
a.lb() = m_offset_;
auto update_fit = [block_size, &fit_func](Interval a, bool &bfound, pair<size_t, size_t> &fit_ret) {
size_t gap;
gap = a.ub() - a.lb() - block_size;
auto fit_update = pair<size_t, size_t>(a.lb(), gap);
if (!bfound) {
bfound = true;
fit_ret = fit_update;
} else if (fit_func(fit_update, fit_ret)) {
fit_ret = fit_update;
}
};
for (auto &b : *interval_v) {
if (top.ub() < b.lb()) {
a.ub() = b.lb();
if (a.contains(block_size) && a.lb() + block.m_size_ <= algorithm[m_algorithm_](this)) {
gap = a.ub() - a.lb() - block_size;
auto fit_update = pair<size_t, size_t>(a.lb(), gap);
if (!bfound) {
bfound = true;
fit_ret = fit_update;
} else if (fit_func(fit_update, fit_ret)) {
fit_ret = fit_update;
}
update_fit(a, bfound, fit_ret);
}
top = b;
} else if (top.ub() < b.ub()) {
@ -113,14 +116,7 @@ bool FootPrint::findFirst(vector<Interval> *interval_v, const BlockTensor &block
a.ub() = algorithm[m_algorithm_](this);
if (a.contains(block_size) && a.lb() + block.m_size_ <= algorithm[m_algorithm_](this)) {
gap = a.ub() - a.lb() - block_size;
auto fit_update = pair<size_t, size_t>(a.lb(), gap);
if (!bfound) {
bfound = true;
fit_ret = fit_update;
} else if (fit_func(fit_update, fit_ret)) {
fit_ret = fit_update;
}
update_fit(a, bfound, fit_ret);
}
if (bfound) {

View File

@ -43,24 +43,26 @@ namespace mindspore {
namespace somas {
class Interval {
public:
Interval() { m_a_ = m_b_ = 0; }
explicit Interval(const SomasSolverTensorDescPtr &t) {
m_a_ = t->offset_;
m_b_ = m_a_ + t->size_;
}
Interval(const size_t &a, const size_t &b) {
m_a_ = a;
m_b_ = b;
Interval() : m_a_(0), m_b_(0) {}
explicit Interval(const SomasSolverTensorDescPtr &t) : m_a_(t->offset_), m_b_(t->offset_ + t->size_) {}
Interval(const size_t &a, const size_t &b) : m_a_(a), m_b_(b) {}
Interval(const Interval &in) {
if (this == &in) {
return;
}
m_a_ = in.m_a_;
m_b_ = in.m_b_;
}
~Interval() = default;
bool intersect(const Interval &i) const { return (in(i.m_a_) || in(i.m_b_)); }
bool in(const size_t &a) const { return ((a > m_a_) && (a < m_b_)); }
Interval intersection(const Interval &i) {
if (m_a_ < i.m_a_)
if (m_a_ < i.m_a_) {
return Interval(m_a_, i.m_b_);
else
} else {
return Interval(i.m_a_, m_b_);
}
}
void merge(const Interval &i) {
m_a_ = std::min(m_a_, i.m_a_);
@ -102,6 +104,17 @@ class BlockTensor {
offsets_(),
m_size_(0) {}
~BlockTensor() = default;
BlockTensor(const BlockTensor &bt) {
if (this == &bt) {
return;
}
m_bre_allocate_ = bt.m_bre_allocate_;
m_current_sol_ = 0;
m_start_tensor_ = bt.m_start_tensor_;
offsets_candidates_ = bt.offsets_candidates_;
offsets_ = bt.offsets_;
m_size_ = bt.m_size_;
}
BlockTensor &operator=(const BlockTensor &bt) {
if (this == &bt) {

View File

@ -146,7 +146,9 @@ bool SomasSolverCore::Verify(const size_t &upperbound) {
for (auto t2_ : tensors_) {
t1 = t1_.second;
t2 = t2_.second;
if (t1->index_ == t2->index_) continue;
if (t1->index_ == t2->index_) {
continue;
}
bool blifelong = (t1->lifelong_ || t2->lifelong_) && (t1->index_ != t2->index_);
if (t2->right_ == t1) { // continuous constraint
// t1 must be continuous to t2
@ -187,13 +189,17 @@ void SomasSolverCore::BuildBlocks() {
uint64_t tensors_block_count = 0;
for (auto tensor : tensors_) {
SomasSolverTensorDescPtr pTensor = tensor.second;
if (pTensor->blocked_) continue;
if (pTensor->blocked_) {
continue;
}
if (pTensor->lifelong_) {
lifelong_memory_ += pTensor->size_;
continue;
}
// move to the left
while (pTensor->left_) pTensor = pTensor->left_;
while (pTensor->left_) {
pTensor = pTensor->left_;
}
// set start tensor
BlockTensor bTensor;
@ -213,8 +219,9 @@ void SomasSolverCore::BuildBlocks() {
this->block_tensors_.emplace_back(bTensor);
}
if (tensors_block_count != tensors_.size())
if (tensors_block_count != tensors_.size()) {
MS_LOG(INFO) << static_cast<int>(tensors_.size() - tensors_block_count) << " lifelong tensors found";
}
}
void SomasSolverCore::Clean() {
@ -283,7 +290,9 @@ void SomasSolverCore::SortTensors() { // need to sort the tensors for Fast Heur
void SomasSolverCore::RestoreSolution(uint32_t sol_id) {
for (auto block : block_tensors_) {
if (block.offsets_.count(sol_id) == 0) MS_ASSERT(0);
if (block.offsets_.count(sol_id) == 0) {
MS_ASSERT(0);
}
size_t bestOffset = block.offsets_[sol_id];
size_t offset = bestOffset;
SomasSolverTensorDescPtr pTensor = block.m_start_tensor_;
@ -329,12 +338,12 @@ void SomasSolverCore::AppendLifelongTensors() {
MS_LOG(DEBUG) << "Appending lifelong tensors to solution";
size_t offset = upperbound_;
std::map<size_t, SomasSolverTensorDescPtr> lifelongTensors;
for (auto &t : tensors_) {
for (const auto &t : tensors_) {
if (t.second->lifelong_) {
(void)lifelongTensors.emplace(t.first, t.second);
}
}
for (auto &t : lifelongTensors) {
for (const auto &t : lifelongTensors) {
auto &pTensor = t.second;
pTensor->offset_ = offset;
offset += pTensor->size_;
@ -352,18 +361,18 @@ size_t SomasSolverCore::FindSolutions() {
pFootprint->setAlgorithm(static_cast<uint32_t>(algorithm_));
Search(pFootprint);
AppendLifelongTensors();
Destroy(pFootprint);
Destroy(&pFootprint);
return upperbound_;
}
void SomasSolverCore::Destroy(std::shared_ptr<FootPrint> &pFootprint) {
while (pFootprint != nullptr) {
if (pFootprint->Next() != nullptr) {
std::shared_ptr<FootPrint> &p = pFootprint;
pFootprint = pFootprint->Next();
void SomasSolverCore::Destroy(std::shared_ptr<FootPrint> *pFootprint) const {
while ((*pFootprint) != nullptr) {
if ((*pFootprint)->Next() != nullptr) {
std::shared_ptr<FootPrint> &p = (*pFootprint);
(*pFootprint) = (*pFootprint)->Next();
p = nullptr;
} else {
pFootprint = nullptr;
(*pFootprint) = nullptr;
}
}
}

View File

@ -50,7 +50,7 @@ class SomasSolverCore {
Status MemoryAllocationSolver();
Status Verify();
bool Verify(const size_t &);
bool Verify(const size_t &upperbound);
void VerifySolution(const bool verify) { verify_ = verify; }
void SortTensors();
void BuildBlocks();
@ -84,7 +84,7 @@ class SomasSolverCore {
size_t FindSolutions();
size_t Search(const std::shared_ptr<FootPrint> &pFootprint);
void AppendLifelongTensors();
void Destroy(std::shared_ptr<FootPrint> &);
void Destroy(std::shared_ptr<FootPrint> *pFootprint) const;
};
} // namespace somas
} // namespace mindspore

View File

@ -28,7 +28,7 @@
namespace mindspore {
namespace somas {
constexpr auto kSolNumThresholdMultiThread = 8;
Status SomasSolverPre::CheckTensors(const TensorsDescMap *pTensors, uint32_t index1, uint32_t index2) {
Status SomasSolverPre::CheckTensors(const TensorsDescMap *pTensors, uint32_t index1, uint32_t index2) const {
auto tensors = *pTensors;
if (tensors[index1] == nullptr) {
MS_LOG(WARNING) << "NULL tensor received in continuous constraint (tensor index " << index1 << ")";
@ -39,12 +39,14 @@ Status SomasSolverPre::CheckTensors(const TensorsDescMap *pTensors, uint32_t ind
return FAILED;
}
if (tensors[index1]->right_)
if (tensors[index1]->right_) {
MS_LOG(WARNING) << "Warning:tensor " << index1
<< " already has a right tensor (id: " << tensors[index1]->right_->index_;
if (tensors[index2]->left_)
}
if (tensors[index2]->left_) {
MS_LOG(WARNING) << "Warning:tensor " << index2
<< " already has a left tensor (id: " << tensors[index2]->left_->index_;
}
return SUCCESS;
}
Status SomasSolverPre::AddContiguousInfoInMap(const vector<vector<size_t>> &continuous_v, TensorsDescMap *pTensors) {
@ -83,7 +85,7 @@ Status SomasSolverPre::AddContiguousInfoInMultiMaps(const vector<vector<size_t>>
}
return SUCCESS;
}
vector<TensorsDescMap> SomasSolverPre::CreateTensorsMaps(const TensorsDescMap &tensors, size_t total_sol) {
vector<TensorsDescMap> SomasSolverPre::CreateTensorsMaps(const TensorsDescMap &tensors, size_t total_sol) const {
vector<TensorsDescMap> vecTensorsMap(total_sol);
vecTensorsMap[0] = tensors;
for (auto &pairT : tensors) {
@ -209,7 +211,7 @@ void SomasSolverPre::Log(const session::KernelGraph *graph, const TensorsDescMap
}
void SomasSolverPre::TensorRelationLog(const std::vector<DynamicBitSet> *pConstraints,
const session::KernelGraph *graph) {
const session::KernelGraph *graph) const {
MS_LOG(INFO) << "SomasSolver::Log Writing somas_tensor_relation.ir..";
auto context_ptr = MsContext::GetInstance();
MS_EXCEPTION_IF_NULL(context_ptr);
@ -229,7 +231,7 @@ void SomasSolverPre::TensorRelationLog(const std::vector<DynamicBitSet> *pConstr
}
void SomasSolverPre::SolverInputLog(const session::KernelGraph *graph, const TensorsDescMap &tensors,
const vector<vector<size_t>> &continuous_v) {
const vector<vector<size_t>> &continuous_v) const {
MS_LOG(INFO) << "SomasSolver::Log Writing somas_solver_input..";
auto context_ptr = MsContext::GetInstance();
MS_EXCEPTION_IF_NULL(context_ptr);
@ -266,12 +268,13 @@ void SomasSolverPre::SolverOutputLog(const session::KernelGraph *graph, const Te
for (auto &t : tensors) {
SomasSolverTensorDescPtr tensor = t.second;
int continuous = 0;
if (tensor->left_ == nullptr && tensor->right_ != nullptr)
if (tensor->left_ == nullptr && tensor->right_ != nullptr) {
continuous = contiguous_left;
else if (tensor->left_ != nullptr && tensor->right_ != nullptr)
} else if (tensor->left_ != nullptr && tensor->right_ != nullptr) {
continuous = contiguous_mid;
else if (tensor->left_ != nullptr && tensor->right_ == nullptr)
} else if (tensor->left_ != nullptr && tensor->right_ == nullptr) {
continuous = contiguous_right;
}
const size_t alignment = 512;
bool size_aligned = tensor->size_ % alignment == 0;
bool offset_aligned = tensor->offset_ % alignment == 0;

View File

@ -71,7 +71,7 @@ class DynamicBitSet {
inline size_t GetIndex(size_t index) const { return index / bit_width_; }
inline uint64_t GetBitMask(size_t index) const {
return (((uint64_t)0x1) << ((bit_width_ - 1) - (index % bit_width_)));
return ((static_cast<uint64_t>(0x1)) << ((bit_width_ - 1) - (index % bit_width_)));
}
inline void Reset(uint64_t val) {
@ -84,10 +84,7 @@ class DynamicBitSet {
public:
size_t bit_size_;
std::vector<uint64_t> bit_;
explicit DynamicBitSet(size_t count) {
bit_size_ = (count + bit_width_ - 1) / bit_width_;
Reset(0x0);
}
explicit DynamicBitSet(size_t count) : bit_size_((count + bit_width_ - 1) / bit_width_) { Reset(0x0); }
~DynamicBitSet() = default;
@ -104,17 +101,17 @@ class DynamicBitSet {
size_t CountOnesNum() const {
size_t ret = 0;
static char ones_num_in_hex[] = "\0\1\1\2\1\2\2\3\1\2\2\3\2\3\3\4";
static unsigned char ones_num_in_hex[] = "\0\1\1\2\1\2\2\3\1\2\2\3\2\3\3\4";
for (size_t i = 0; i < bit_size_; i++) {
auto value = bit_[i];
if (value == 0) {
continue;
}
char *char_value = reinterpret_cast<char *>(&value);
auto *char_value = reinterpret_cast<unsigned char *>(&value);
for (size_t j = 0; j < bit_width_ / CHAR_BIT; j++) {
ret += ones_num_in_hex[char_value[j] & 0xF];
ret += ones_num_in_hex[static_cast<int>(char_value[j] & 0xF)];
char_value[j] >>= 4;
ret += ones_num_in_hex[char_value[j] & 0xF];
ret += ones_num_in_hex[static_cast<int>(char_value[j] & 0xF)];
}
}
return ret;
@ -149,12 +146,14 @@ struct SomasSolverTensorDesc {
SomasSolverTensorDesc() = default;
SomasSolverTensorDesc(size_t index, size_t size, size_t offset, bool blifelong)
: index_(index), size_(size), offset_(offset), lifelong_(blifelong) {
constraints_ = 0;
right_ = nullptr;
left_ = nullptr;
blocked_ = false;
}
: index_(index),
size_(size),
offset_(offset),
lifelong_(blifelong),
constraints_(0),
right_(nullptr),
left_(nullptr),
blocked_(false) {}
void Update(size_t index, size_t size, size_t offset, bool blifelong, size_t constraints) {
index_ = index;
@ -185,7 +184,7 @@ class SomasSolverPre {
size_t GetMaxOffset() const { return max_offset_; }
Status Solving(const session::KernelGraph *graph, TensorsDescMap *tensors,
Status Solving(const session::KernelGraph *graph, TensorsDescMap *ptensors,
const std::vector<DynamicBitSet> *pConstraints, const vector<vector<size_t>> &continuous_v,
bool bVerifySolution, // true -> Check continuous and non overlapping constraints solution
bool ball = true, // true -> run full set of heuristics, false -> run single heuristic specified
@ -193,9 +192,9 @@ class SomasSolverPre {
AlgorithmType algorithm = kManyObjects);
void Log(const session::KernelGraph *graph, const TensorsDescMap &tensors,
const std::vector<DynamicBitSet> *pConstraints_v, const vector<vector<size_t>> &continuous_v);
const std::vector<DynamicBitSet> *pConstraints, const vector<vector<size_t>> &continuous_v);
Status CheckTensors(const TensorsDescMap *pTensors, uint32_t index1, uint32_t index2);
Status CheckTensors(const TensorsDescMap *pTensors, uint32_t index1, uint32_t index2) const;
Status AddContiguousInfoInMap(const vector<vector<size_t>> &continuous_v, TensorsDescMap *pTensors);
Status AddContiguousInfoInMultiMaps(const vector<vector<size_t>> &continuous_v, vector<TensorsDescMap> *vecTensorsMap,
const TensorsDescMap *pTensors);
@ -203,10 +202,10 @@ class SomasSolverPre {
private:
size_t max_offset_;
void SolverInputLog(const session::KernelGraph *graph, const TensorsDescMap &tensors,
const vector<vector<size_t>> &continuous_v);
const vector<vector<size_t>> &continuous_v) const;
void SolverOutputLog(const session::KernelGraph *graph, const TensorsDescMap &tensors) const;
vector<TensorsDescMap> CreateTensorsMaps(const TensorsDescMap &tensors, size_t total_sol);
void TensorRelationLog(const std::vector<DynamicBitSet> *pConstraints, const session::KernelGraph *graph);
vector<TensorsDescMap> CreateTensorsMaps(const TensorsDescMap &tensors, size_t total_sol) const;
void TensorRelationLog(const std::vector<DynamicBitSet> *pConstraints, const session::KernelGraph *graph) const;
};
using SomasSolverPrePtr = std::shared_ptr<SomasSolverPre>;
} // namespace somas

View File

@ -21,8 +21,12 @@ namespace somas {
SomasTensor::SomasTensor(size_t id, size_t source_node_id, size_t source_stream_id, size_t real_size,
LifeLongType lifelong_value)
: lifelong_value_(lifelong_value),
between_streams_(false),
contiguous_(false),
type_(kUnknown),
offset_(0),
num_constraints_(0),
ref_overlap_(false),
id_(id),
source_node_id_(source_node_id),
source_stream_id_(source_stream_id),
@ -30,13 +34,7 @@ SomasTensor::SomasTensor(size_t id, size_t source_node_id, size_t source_stream_
const size_t alignment = 512;
const size_t alignment_complement = 31;
aligned_size_ = (real_size > 0) ? ((real_size + alignment + alignment_complement) / alignment) * alignment : 0;
solver_tensor_desc_ = std::make_shared<SomasSolverTensorDesc>(id_, aligned_size_, offset_, false);
ref_overlap_ = false;
between_streams_ = false;
contiguous_ = false;
num_constraints_ = 0;
}
SomasSolverTensorDescPtr SomasTensor::GetSolverTensorDesc() {

View File

@ -456,7 +456,7 @@ void DumpPrimalDebugInfos(const CNodePtr &node, const std::shared_ptr<SubGraphIR
auto primal_debug_infos = node->primal_debug_infos();
if (!primal_debug_infos.empty()) {
std::string lines;
for (auto &primal_debug_info : primal_debug_infos) {
for (const auto &primal_debug_info : primal_debug_infos) {
auto debug_info_str = trace::GetDebugInfo(primal_debug_info, " # ", kSourceLineTipDiscard);
if (!debug_info_str.empty()) {
lines += debug_info_str + "\n";

View File

@ -234,7 +234,7 @@ std::string Graphviz::Shape(const AnfNodePtr &node) {
return "plaintext";
}
std::string Graphviz::Color(const AnfNodePtr &node) {
std::string Graphviz::Color(const AnfNodePtr &node) const {
if (node == nullptr) {
return "";
}

View File

@ -43,17 +43,17 @@ class ProtoExporter {
private:
void InitModelInfo();
void GetOpNodeTypeAndAttrs(const FuncGraphPtr &func_graph, const CNodePtr &cnode, irpb::NodeProto *node_proto);
std::string GetOpNodeInputId(const FuncGraphPtr &func_graph, const AnfNodePtr &node,
void GetOpNodeTypeAndAttrs(const FuncGraphPtr & /* func_graph */, const CNodePtr &cnode, irpb::NodeProto *node_proto);
std::string GetOpNodeInputId(const FuncGraphPtr & /* func_graph */, const AnfNodePtr &node,
const std::map<AnfNodePtr, size_t> &apply_map,
std::map<AnfNodePtr, size_t> *const_map_ptr) const;
void SetValueToProtoBasicTypes(const ValuePtr &attr_value, irpb::ValueProto *value_proto);
void SetValueToProto(const ValuePtr &attr_value, irpb::ValueProto *value_proto);
void SetScalarToProto(const ScalarPtr &val, irpb::ValueProto *value_proto);
void SetValueToProtoBasicTypes(const ValuePtr &val, irpb::ValueProto *const value_proto);
void SetValueToProto(const ValuePtr &val, irpb::ValueProto *value_proto);
void SetScalarToProto(const ScalarPtr &val, irpb::ValueProto *value_proto) const;
void SetSequenceToProto(const ValueSequencePtr &val, irpb::ValueProto *value_proto);
void SetDictionaryToProto(const ValueDictionaryPtr &val, irpb::ValueProto *value_proto);
void SetNodeOutputType(const AnfNodePtr &node, irpb::TypeProto *type_proto);
void SetNodeOutputType(const TypePtr &node, const BaseShapePtr &shape, irpb::TypeProto *type_proto);
void SetNodeOutputType(const TypePtr &type, const BaseShapePtr &shape, irpb::TypeProto *type_proto);
void ExportParameters(const FuncGraphPtr &func_graph, irpb::GraphProto *graph_proto);
void ExportCNodes(const FuncGraphPtr &func_graph, irpb::GraphProto *graph_proto,
@ -70,46 +70,31 @@ class ProtoExporter {
irpb::ModelProto model_;
};
static std::map<TypeId, irpb::DataType> number_data_type_map = {{kNumberTypeBool, irpb::DT_BOOL},
{kNumberTypeInt8, irpb::DT_INT8},
{kNumberTypeInt16, irpb::DT_INT16},
{kNumberTypeInt32, irpb::DT_INT32},
{kNumberTypeInt64, irpb::DT_INT64},
{kNumberTypeUInt8, irpb::DT_UINT8},
{kNumberTypeUInt16, irpb::DT_UINT16},
{kNumberTypeUInt32, irpb::DT_UINT32},
{kNumberTypeUInt64, irpb::DT_UINT64},
{kNumberTypeFloat16, irpb::DT_FLOAT16},
{kNumberTypeFloat32, irpb::DT_FLOAT32},
{kNumberTypeFloat64, irpb::DT_FLOAT64},
{kNumberTypeInt, irpb::DT_BASE_INT},
{kNumberTypeUInt, irpb::DT_BASE_UINT},
{kNumberTypeFloat, irpb::DT_BASE_FLOAT},
{kNumberTypeComplex64, irpb::DT_COMPLEX64},
{kNumberTypeComplex128, irpb::DT_COMPLEX128},
{kObjectTypeString, irpb::DT_STRING}};
static irpb::DataType GetNumberDataType(const TypePtr &type) {
switch (type->type_id()) {
case kNumberTypeBool:
return irpb::DT_BOOL;
case kNumberTypeInt8:
return irpb::DT_INT8;
case kNumberTypeInt16:
return irpb::DT_INT16;
case kNumberTypeInt32:
return irpb::DT_INT32;
case kNumberTypeInt64:
return irpb::DT_INT64;
case kNumberTypeUInt8:
return irpb::DT_UINT8;
case kNumberTypeUInt16:
return irpb::DT_UINT16;
case kNumberTypeUInt32:
return irpb::DT_UINT32;
case kNumberTypeUInt64:
return irpb::DT_UINT64;
case kNumberTypeFloat16:
return irpb::DT_FLOAT16;
case kNumberTypeFloat32:
return irpb::DT_FLOAT32;
case kNumberTypeFloat64:
return irpb::DT_FLOAT64;
case kNumberTypeInt:
return irpb::DT_BASE_INT;
case kNumberTypeUInt:
return irpb::DT_BASE_UINT;
case kNumberTypeFloat:
return irpb::DT_BASE_FLOAT;
case kNumberTypeComplex64:
return irpb::DT_COMPLEX64;
case kNumberTypeComplex128:
return irpb::DT_COMPLEX128;
case kObjectTypeString:
return irpb::DT_STRING;
default:
MS_LOG(EXCEPTION) << "Unexpected type " << type->type_name();
auto iter = number_data_type_map.find(type->type_id());
if (iter != number_data_type_map.end()) {
return (*iter).second;
} else {
MS_LOG(EXCEPTION) << "Unexpected type " << type->type_name();
}
}
@ -258,7 +243,7 @@ void ProtoExporter::SetValueToProto(const ValuePtr &val, irpb::ValueProto *value
}
}
void ProtoExporter::SetScalarToProto(const ScalarPtr &val, irpb::ValueProto *value_proto) {
void ProtoExporter::SetScalarToProto(const ScalarPtr &val, irpb::ValueProto *value_proto) const {
if (val == nullptr || value_proto == nullptr) {
return;
}
@ -345,7 +330,8 @@ void ProtoExporter::SetDictionaryToProto(const ValueDictionaryPtr &val, irpb::Va
}
}
void ProtoExporter::GetOpNodeTypeAndAttrs(const FuncGraphPtr &, const CNodePtr &cnode, irpb::NodeProto *node_proto) {
void ProtoExporter::GetOpNodeTypeAndAttrs(const FuncGraphPtr & /* func_graph */, const CNodePtr &cnode,
irpb::NodeProto *node_proto) {
const auto &inputs = cnode->inputs();
AnfNodePtr op_node = inputs[0];
@ -380,7 +366,7 @@ void ProtoExporter::GetOpNodeTypeAndAttrs(const FuncGraphPtr &, const CNodePtr &
node_proto->set_scope(op_node->scope()->name());
}
std::string ProtoExporter::GetOpNodeInputId(const FuncGraphPtr &, const AnfNodePtr &node,
std::string ProtoExporter::GetOpNodeInputId(const FuncGraphPtr & /* func_graph */, const AnfNodePtr &node,
const std::map<AnfNodePtr, size_t> &apply_map,
std::map<AnfNodePtr, size_t> *const_map_ptr) const {
if (node == nullptr || const_map_ptr == nullptr) {

View File

@ -141,7 +141,7 @@ std::string DuplexPipe::ReadWithStdin() {
return buf;
}
DuplexPipe &DuplexPipe::operator<<(const std::string &buf) {
const DuplexPipe &DuplexPipe::operator<<(const std::string &buf) const {
Write(buf);
return *this;
}
@ -197,7 +197,7 @@ void DuplexPipe::SignalHandler::SigPipeHandler(int sig) {
}
}
void DuplexPipe::SignalHandler::SigChildHandler(int) {
void DuplexPipe::SignalHandler::SigChildHandler(int /* sig */) {
int status;
if (child_pid_ != nullptr) {
(void)waitpid(*child_pid_, &status, WNOHANG | WUNTRACED);

View File

@ -33,7 +33,9 @@ void DuplexPipe::WriteWithStdout(const std::string &buf, bool flush) {
std::string DuplexPipe::ReadWithStdin() { DP_EXCEPTION << "Not support for Windows by now."; }
DuplexPipe &DuplexPipe::operator<<(const std::string &buf) { DP_EXCEPTION << "Not support for Windows by now."; }
const DuplexPipe &DuplexPipe::operator<<(const std::string &buf) const {
DP_EXCEPTION << "Not support for Windows by now.";
}
DuplexPipe &DuplexPipe::operator>>(std::string &buf) { DP_EXCEPTION << "Not support for Windows by now."; }

View File

@ -231,7 +231,7 @@ std::vector<char> AscendDeviceInfo::GetDynamicBatchSizeChar() const {
return StringToChar(ref);
}
void AscendDeviceInfo::SetDynamicImageSize(const std::vector<char> &) { return; }
void AscendDeviceInfo::SetDynamicImageSize(const std::vector<char> & /* dynamic_image_size */) { return; }
std::vector<char> AscendDeviceInfo::GetDynamicImageSizeChar() const { return std::vector<char>(); }

View File

@ -21,6 +21,7 @@
#include <memory>
#include <string>
#include <fstream>
#include "include/api/status.h"
#include "utils/file_utils.h"
namespace mindspore {

View File

@ -21,6 +21,7 @@
#include <vector>
#include <memory>
#include <utility>
#include "include/api/context.h"
#include "include/common/utils/utils.h"
namespace mindspore {

View File

@ -21,8 +21,7 @@ namespace mindspore {
std::shared_ptr<AclEnvGuard> AclEnvGuard::global_acl_env_ = nullptr;
std::mutex AclEnvGuard::global_acl_env_mutex_;
AclEnvGuard::AclEnvGuard() {
errno_ = aclInit(nullptr);
AclEnvGuard::AclEnvGuard() : errno_(aclInit(nullptr)) {
if (errno_ != ACL_ERROR_NONE && errno_ != ACL_ERROR_REPEAT_INITIALIZE) {
MS_LOG(ERROR) << "Execute aclInit Failed";
return;

View File

@ -20,7 +20,6 @@
#include <string>
#include <vector>
#include <memory>
#include <utility>
#include "include/api/graph.h"
#include "cxx_api/graph/acl/model_process.h"
#include "cxx_api/graph/acl/acl_env_guard.h"

View File

@ -82,7 +82,7 @@ static std::string ShapeToString(const std::vector<int64_t> &shape) {
}
Status ModelProcess::ConstructTensors(const std::vector<AclTensorInfo> &acl_tensor_list,
std::vector<MSTensor> *tensor_list) {
std::vector<MSTensor> *tensor_list) const {
MS_EXCEPTION_IF_NULL(tensor_list);
std::vector<std::string> names;
std::vector<std::vector<int64_t>> shapes;
@ -328,10 +328,10 @@ Status ModelProcess::UnLoad() {
return kSuccess;
}
size_t ModelProcess::GetDynamicDims(const std::vector<AclTensorInfo> &inputs) {
size_t ModelProcess::GetDynamicDims(const std::vector<AclTensorInfo> &inputs) const {
size_t max_num = 0;
for (auto input : inputs) {
size_t cur_num = std::count(input.dims.begin(), input.dims.end(), -1);
size_t cur_num = LongToSize(std::count(input.dims.begin(), input.dims.end(), -1));
if (cur_num > max_num) {
max_num = cur_num;
}
@ -345,7 +345,7 @@ Status ModelProcess::SetBatchSize(const std::vector<MSTensor> &inputs) {
for (size_t i = 0; i < inputs.size(); i++) {
input_infos_[i].buffer_size = inputs[i].DataSize();
}
auto *p = reinterpret_cast<const float *>(inputs[inputs.size() - 1].Data().get());
auto *p = static_cast<const float *>(inputs[inputs.size() - 1].Data().get());
MS_EXCEPTION_IF_NULL(p);
size_t dynamicBatchSize = FloatToSize(p[0]);
ret = aclmdlGetInputIndexByName(model_desc_, ACL_DYNAMIC_TENSOR_NAME, &index);

View File

@ -62,7 +62,7 @@ class ModelProcess {
private:
Status CreateDataBuffer(void **data_mem_buffer, size_t buffer_size, aclmdlDataset *dataset) const;
Status CheckAndInitInput(const std::vector<MSTensor> &inputs);
Status ConstructTensors(const std::vector<AclTensorInfo> &acl_tensor_list, std::vector<MSTensor> *tensor_list);
Status ConstructTensors(const std::vector<AclTensorInfo> &acl_tensor_list, std::vector<MSTensor> *tensor_list) const;
Status BuildOutputs(std::vector<MSTensor> *outputs);
Status SetBatchSize(const std::vector<MSTensor> &inputs);
Status InitInputsBuffer();
@ -84,7 +84,7 @@ class ModelProcess {
std::vector<AclTensorInfo> output_infos_;
std::vector<MSTensor> input_tensors_;
std::vector<MSTensor> output_tensors_;
size_t GetDynamicDims(const std::vector<AclTensorInfo> &);
size_t GetDynamicDims(const std::vector<AclTensorInfo> &inputs) const;
};
} // namespace mindspore

View File

@ -322,9 +322,8 @@ Status AscendGraphImpl::Run(const std::vector<MSTensor> &inputs, std::vector<MST
return kSuccess;
}
AscendGraphImpl::MsEnvGuard::MsEnvGuard(uint32_t device_id) {
AscendGraphImpl::MsEnvGuard::MsEnvGuard(uint32_t device_id) : device_id_(device_id) {
MS_LOG(INFO) << "Start to init device " << device_id;
device_id_ = device_id;
RegAllOp();
auto ms_context = MsContext::GetInstance();
if (ms_context == nullptr) {
@ -370,29 +369,34 @@ AscendGraphImpl::MsEnvGuard::MsEnvGuard(uint32_t device_id) {
AscendGraphImpl::MsEnvGuard::~MsEnvGuard() {
MS_LOG(INFO) << "Start finalize device " << device_id_;
session::ExecutorManager::Instance().Clear();
device::KernelRuntimeManager::Instance().ClearRuntimeResource();
try {
session::ExecutorManager::Instance().Clear();
device::KernelRuntimeManager::Instance().ClearRuntimeResource();
auto ms_context = MsContext::GetInstance();
if (ms_context == nullptr) {
MS_LOG(ERROR) << "Get Context failed!";
return;
}
if (ms_context->get_param<bool>(MS_CTX_ENABLE_HCCL)) {
PythonEnvGuard guard;
if (!context::CloseTsd(ms_context)) {
MS_LOG(ERROR) << "CloseTsd failed!";
auto ms_context = MsContext::GetInstance();
if (ms_context == nullptr) {
MS_LOG(ERROR) << "Get Context failed!";
return;
}
} else {
auto ret = rtDeviceReset(static_cast<int32_t>(device_id_));
if (ret != RT_ERROR_NONE) {
MS_LOG(ERROR) << "Device " << device_id_ << " call rtDeviceReset failed, ret[" << static_cast<int>(ret) << "]";
return;
}
}
if (ms_context->get_param<bool>(MS_CTX_ENABLE_HCCL)) {
PythonEnvGuard guard;
if (!context::CloseTsd(ms_context)) {
MS_LOG(ERROR) << "CloseTsd failed!";
return;
}
} else {
auto ret = rtDeviceReset(static_cast<int32_t>(device_id_));
if (ret != RT_ERROR_NONE) {
MS_LOG(ERROR) << "Device " << device_id_ << " call rtDeviceReset failed, ret[" << static_cast<int>(ret) << "]";
return;
}
}
} catch (const std::exception &e) {
MS_LOG(ERROR) << "AscendGraphImpl MsEnvGuard destructor run failed, error message : " << e.what();
} catch (...) {
MS_LOG(ERROR) << "AscendGraphImpl MsEnvGuard destructor run failed, unknown error occurred.";
}
MS_LOG(INFO) << "End finalize device " << device_id_;
}
@ -431,10 +435,7 @@ bool AscendGraphImpl::CheckDeviceSupport(mindspore::DeviceType device_type) {
std::map<uint32_t, std::weak_ptr<AscendGraphImpl::MsEnvGuard>> AscendGraphImpl::MsEnvGuard::global_ms_env_;
std::mutex AscendGraphImpl::MsEnvGuard::global_ms_env_mutex_;
PythonEnvGuard::PythonEnvGuard() {
origin_init_status_ = PythonIsInited();
InitPython();
}
PythonEnvGuard::PythonEnvGuard() : origin_init_status_(PythonIsInited()) { InitPython(); }
PythonEnvGuard::~PythonEnvGuard() {
// finalize when init by this

View File

@ -20,7 +20,6 @@
#include <string>
#include <vector>
#include <memory>
#include <utility>
#include "include/api/status.h"
#include "include/api/graph.h"
#include "cxx_api/graph/graph_impl.h"
@ -48,7 +47,7 @@ class AscendGraphImpl : public GraphCell::GraphImpl {
Status CompileGraph(const std::shared_ptr<FuncGraph> &funcGraphPtr);
Status CheckModelInputs(const std::vector<tensor::TensorPtr> &inputs) const;
std::vector<tensor::TensorPtr> RunGraph(const std::vector<tensor::TensorPtr> &inputs);
Status ExecuteModel(const std::vector<MSTensor> &inputs, std::vector<MSTensor> *outputs);
Status ExecuteModel(const std::vector<MSTensor> &request, std::vector<MSTensor> *reply);
std::shared_ptr<session::SessionBasic> session_impl_;
uint32_t graph_id_;

View File

@ -49,7 +49,8 @@ class COMMON_EXPORT Common {
static std::string AddId(const std::string &filename, const std::string &suffix);
static bool SaveStringToFile(const std::string filename, const std::string string_info);
static bool FileExists(const std::string &filepath);
static bool CommonFuncForConfigPath(const std::string &default_path, const std::string &env_path, std::string *value);
static bool CommonFuncForConfigPath(const std::string &default_path, const std::string &env_path,
std::string *const value);
static std::string GetCompilerCachePath();
static std::string GetUserDefineCachePath();
static bool GetDebugTerminate();

View File

@ -39,7 +39,7 @@ class Graphviz {
virtual void End() {}
virtual std::string Shape(const AnfNodePtr &node);
std::string Color(const AnfNodePtr &node);
std::string Color(const AnfNodePtr &node) const;
std::ostringstream &buffer() { return buffer_; }
std::ostringstream buffer_;
@ -56,7 +56,7 @@ class BaseDigraph : public Graphviz {
~BaseDigraph() override = default;
virtual void Node(const AnfNodePtr &node, int id) = 0;
virtual void Edge(const AnfNodePtr &start, const AnfNodePtr &end, int idx, int idx_start) = 0;
virtual void Edge(const AnfNodePtr &start, const AnfNodePtr &end, int idx, int id_start) = 0;
void Start() override;
void End() override;
@ -79,7 +79,7 @@ class Digraph : public BaseDigraph {
~Digraph() override;
void Node(const AnfNodePtr &node, int id) override;
void Edge(const AnfNodePtr &start, const AnfNodePtr &end, int idx, int idx_start) override;
void Edge(const AnfNodePtr &start, const AnfNodePtr &end, int idx, int id_start) override;
};
class ModelDigraph : public BaseDigraph {
@ -90,7 +90,7 @@ class ModelDigraph : public BaseDigraph {
std::string Shape(const AnfNodePtr &node) override;
void Node(const AnfNodePtr &node, int id) override;
void Edge(const AnfNodePtr &start, const AnfNodePtr &end, int idx, int idx_start) override;
void Edge(const AnfNodePtr &start, const AnfNodePtr &end, int idx, int id_start) override;
};
// API to draw

View File

@ -58,7 +58,7 @@ class COMMON_EXPORT DuplexPipe : public std::enable_shared_from_this<mindspore::
void WriteWithStdout(const std::string &buf, bool flush);
std::string ReadWithStdin();
DuplexPipe &operator<<(const std::string &buf);
const DuplexPipe &operator<<(const std::string &buf) const;
DuplexPipe &operator>>(std::string &buf);
private:
@ -116,7 +116,7 @@ class COMMON_EXPORT DuplexPipe : public std::enable_shared_from_this<mindspore::
private:
static void SigAlarmHandler(int sig);
static void SigPipeHandler(int sig);
static void SigChildHandler(int sig);
static void SigChildHandler(int /* sig */);
inline static std::weak_ptr<DuplexPipe> dp_;
inline static pid_t *child_pid_;