!21112 unified runtime pclint fixed

Merge pull request !21112 from limingqi107/bug_fix
This commit is contained in:
i-robot 2021-07-30 16:05:04 +00:00 committed by Gitee
commit 5a58327a2e
16 changed files with 111 additions and 108 deletions

View File

@ -344,7 +344,7 @@ std::vector<KernelWithIndex> AnfRuntimeAlgorithm::GetAllOutputWithIndex(const An
}
// The output may be the tuple of node, so need visit all the outputs of node.
for (size_t i = 0; i < outputs_num; ++i) {
auto output_with_index = AnfAlgo::VisitKernelWithReturnType(node, i, false, return_types);
auto output_with_index = AnfAlgo::VisitKernelWithReturnType(node, SizeToInt(i), false, return_types);
MS_EXCEPTION_IF_NULL(output_with_index.first);
// The depend and makeTuple node need recurse.

View File

@ -1222,7 +1222,7 @@ void KernelGraph::UpdateGraphOutputMap(const std::vector<AnfWithOutIndex> &old_o
<< old_output.second << " to " << new_output.first->fullname_with_scope() << " with index "
<< new_output.second;
graph_output_to_front_node_map_[new_output] = graph_output_to_front_node_map_[old_output];
graph_output_to_front_node_map_.erase(old_output);
(void)graph_output_to_front_node_map_.erase(old_output);
}
if (old_output.first == new_output.first) {

View File

@ -1644,8 +1644,8 @@ void SessionBasic::UpdateOutputTensors(const VectorRef *outputs,
if (AnfAlgo::IsDynamicShape(node)) {
const auto &updated_shape = AnfAlgo::GetOutputInferShape(node, output_index);
ShapeVector int_shape;
std::transform(updated_shape.begin(), updated_shape.end(), std::back_inserter(int_shape), SizeToInt);
tensor->set_shape(int_shape);
(void)std::transform(updated_shape.begin(), updated_shape.end(), std::back_inserter(int_shape), SizeToInt);
(void)tensor->set_shape(int_shape);
}
}
if (tensor->NeedSyncDeviceToHostImmediately()) {

View File

@ -78,7 +78,7 @@ bool GroupManager::CreateGroupByExecutor(const std::string &device_name, const s
(MsContext::GetInstance()->get_param<std::string>(MS_CTX_DEVICE_TARGET) == kGPUDevice)) {
return CommManager::GetInstance().CreateGroupSync(group_name, ranks);
} else {
auto executor = session::ExecutorManager::Instance().GetExecutor(device_name, device_id);
auto executor = session::ExecutorManager::Instance().GetExecutor(device_name, IntToUint(device_id));
MS_EXCEPTION_IF_NULL(executor);
return executor->CreateCommGroup(group_name, ranks);
}
@ -91,7 +91,7 @@ bool GroupManager::DestroyGroupByExecutor(const std::string &device_name, const
(MsContext::GetInstance()->get_param<std::string>(MS_CTX_DEVICE_TARGET) == kGPUDevice)) {
return CommManager::GetInstance().DestroyGroup(group_name);
} else {
auto executor = session::ExecutorManager::Instance().GetExecutor(device_name, device_id);
auto executor = session::ExecutorManager::Instance().GetExecutor(device_name, IntToUint(device_id));
MS_EXCEPTION_IF_NULL(executor);
return executor->DestroyCommGroup(group_name);
}

View File

@ -109,7 +109,7 @@ bool SetNodedefProto(const std::shared_ptr<KernelNodeInfo> &op_info,
bool AscendPsCache::InitDevice(uint32_t device_id, const void *context) {
MS_ERROR_IF_NULL(context);
auto ret = rtSetDevice(device_id);
auto ret = rtSetDevice(UintToInt(device_id));
if (ret != RT_ERROR_NONE) {
MS_LOG(ERROR) << "Call rtSetDevice, ret[" << ret << "]";
return false;
@ -233,10 +233,10 @@ bool AscendPsCache::HashSwapOut(void *hash_table_addr, void *swap_out_value_addr
AddressPtrList kernel_outputs = {
std::make_shared<Address>(swap_out_value_addr, swap_out_size * embedding_size * sizeof(float))};
AddressPtrList kernel_workspaces;
kernel_inputs.emplace_back(
(void)kernel_inputs.emplace_back(
std::make_shared<Address>(hash_table_addr, cache_vocab_size * embedding_size * sizeof(float)));
kernel_inputs.emplace_back(std::make_shared<Address>(swap_out_index_addr, swap_out_size * sizeof(int)));
kernel_inputs.emplace_back(std::make_shared<Address>(offset_addr_, sizeof(int)));
(void)kernel_inputs.emplace_back(std::make_shared<Address>(swap_out_index_addr, swap_out_size * sizeof(int)));
(void)kernel_inputs.emplace_back(std::make_shared<Address>(offset_addr_, sizeof(int)));
auto ret = hash_swap_out_mod->Launch(kernel_inputs, kernel_workspaces, kernel_outputs, stream_);
if (!ret) {
MS_LOG(ERROR) << "Hash swap out launch failed.";
@ -272,14 +272,14 @@ bool AscendPsCache::HashSwapIn(void *hash_table_addr, void *swap_in_value_addr,
AddressPtrList kernel_inputs;
AddressPtrList kernel_outputs;
AddressPtrList kernel_workspaces;
kernel_inputs.emplace_back(
(void)kernel_inputs.emplace_back(
std::make_shared<Address>(hash_table_addr, cache_vocab_size * embedding_size * sizeof(float)));
kernel_inputs.emplace_back(std::make_shared<Address>(swap_in_index_addr, swap_in_size * sizeof(int)));
kernel_inputs.emplace_back(
(void)kernel_inputs.emplace_back(std::make_shared<Address>(swap_in_index_addr, swap_in_size * sizeof(int)));
(void)kernel_inputs.emplace_back(
std::make_shared<Address>(swap_in_value_addr, swap_in_size * embedding_size * sizeof(float)));
kernel_inputs.emplace_back(std::make_shared<Address>(cache_vocab_size_addr_, sizeof(int)));
(void)kernel_inputs.emplace_back(std::make_shared<Address>(cache_vocab_size_addr_, sizeof(int)));
// The output of updateCache kernel is required but not useful, so any address can be assigned.
kernel_outputs.emplace_back(std::make_shared<Address>(offset_addr_, sizeof(int)));
(void)kernel_outputs.emplace_back(std::make_shared<Address>(offset_addr_, sizeof(int)));
auto ret = hash_swap_in_mod->Launch(kernel_inputs, kernel_workspaces, kernel_outputs, stream_);
if (!ret) {
MS_LOG(ERROR) << "Hash swap in launch failed.";

View File

@ -24,7 +24,9 @@ namespace mindspore {
namespace device {
namespace cpu {
CPUDeviceAddress::~CPUDeviceAddress() {
CPUDeviceAddress::~CPUDeviceAddress() { ClearDeviceMemory(); }
void CPUDeviceAddress::ClearDeviceMemory() {
if (ptr_ == nullptr) {
return;
}

View File

@ -41,7 +41,7 @@ class CPUDeviceAddress : public DeviceAddress {
const std::string &format = "DefaultFormat") const override;
bool DumpMemToFile(const std::string &filepath, const std::string &host_fmt, const ShapeVector &host_shape,
TypeId host_type, bool trans_flag) const override;
void ClearDeviceMemory() override {}
void ClearDeviceMemory() override;
DeviceAddressType DeviceType() const override { return DeviceAddressType::kCPU; }
};
} // namespace cpu

View File

@ -34,14 +34,14 @@ void CopyActor::Init() {
MS_LOG(EXCEPTION) << "The output index is out of range: " << GetAID().Name();
}
auto data = std::make_unique<OpData<DeviceTensor>>(data_arrow->to_op_id_, nullptr, data_arrow->to_input_index_);
output_data_.emplace_back(std::move(data));
(void)output_data_.emplace_back(std::move(data));
}
}
void CopyActor::RunOpData(OpData<DeviceTensor> *const input_data, OpContext<DeviceTensor> *const context) {
MS_EXCEPTION_IF_NULL(context);
auto &sequential_num = context->sequential_num_;
input_op_datas_[sequential_num].emplace_back(input_data);
(void)input_op_datas_[sequential_num].emplace_back(input_data);
// When all the inputs are collected, then allocate memory and callback copy.
if (CheckCopyCondition(context)) {
FetchDeviceTensor(context);
@ -52,7 +52,7 @@ void CopyActor::RunOpData(OpData<DeviceTensor> *const input_data, OpContext<Devi
void CopyActor::RunOpControl(AID *const input_control, OpContext<DeviceTensor> *const context) {
MS_EXCEPTION_IF_NULL(context);
auto &sequential_num = context->sequential_num_;
input_op_controls_[sequential_num].emplace_back(input_control);
(void)input_op_controls_[sequential_num].emplace_back(input_control);
// When all the inputs are collected, then allocate memory and callback copy.
if (CheckCopyCondition(context)) {
FetchDeviceTensor(context);

View File

@ -31,7 +31,7 @@ void DataSourceActor::Init() {
for (auto &data_arrow : output_data_arrows_) {
MS_EXCEPTION_IF_NULL(data_arrow);
auto data = std::make_unique<OpData<DeviceTensor>>(data_arrow->to_op_id_, nullptr, data_arrow->to_input_index_);
output_data_.emplace_back(std::move(data));
(void)output_data_.emplace_back(std::move(data));
}
}
@ -102,13 +102,13 @@ void DeviceQueueDataSourceActor::Init() {
for (auto &data_arrow : output_data_arrows_) {
MS_EXCEPTION_IF_NULL(data_arrow);
auto data = std::make_unique<OpData<DeviceTensor>>(data_arrow->to_op_id_, nullptr, data_arrow->to_input_index_);
output_data_.emplace_back(std::move(data));
(void)output_data_.emplace_back(std::move(data));
}
// Init kernel launch info.
MS_EXCEPTION_IF_NULL(kernel_info_);
for (size_t i = 0; i < kernel_info_->output_address_list().size(); ++i) {
launch_info_.outputs_.emplace_back(std::make_shared<Address>());
(void)launch_info_.outputs_.emplace_back(std::make_shared<Address>());
}
}
@ -118,7 +118,7 @@ void DeviceQueueDataSourceActor::FillDataBuffer() {
std::vector<DeviceTensor *> device_tensors;
for (auto &device_tensor : kernel_info_->output_address_list()) {
MS_EXCEPTION_IF_NULL(device_tensor);
device_tensors.emplace_back(device_tensor.get());
(void)device_tensors.emplace_back(device_tensor.get());
}
buffers_.push(device_tensors);
@ -207,7 +207,7 @@ void HostQueueDataSourceActor::FillDataBuffer() {
for (auto &data_node : data_nodes_) {
auto device_address = AnfAlgo::GetMutableOutputAddr(data_node, 0, false);
MS_EXCEPTION_IF_NULL(device_address);
device_tensors.emplace_back(device_address.get());
(void)device_tensors.emplace_back(device_address.get());
}
buffers_.push(device_tensors);

View File

@ -37,26 +37,26 @@ void KernelActor::Init() {
copy_input_device_tensors_.resize(real_input_num_);
input_device_tensors_.resize(real_input_num_);
for (auto &input_address : input_device_tensors_) {
memory_free_list_.emplace_back(input_address);
launch_info_.inputs_.emplace_back(std::make_shared<Address>());
(void)memory_free_list_.emplace_back(input_address);
(void)launch_info_.inputs_.emplace_back(std::make_shared<Address>());
}
MS_EXCEPTION_IF_NULL(kernel_info_);
for (auto &output_address : kernel_info_->output_address_list()) {
MS_EXCEPTION_IF_NULL(output_address);
output_device_tensors_.emplace_back(output_address.get());
memory_alloc_list_.emplace_back(output_address.get());
memory_free_list_.emplace_back(output_address.get());
launch_info_.outputs_.emplace_back(std::make_shared<Address>());
(void)output_device_tensors_.emplace_back(output_address.get());
(void)memory_alloc_list_.emplace_back(output_address.get());
(void)memory_free_list_.emplace_back(output_address.get());
(void)launch_info_.outputs_.emplace_back(std::make_shared<Address>());
}
for (auto &workspace_address : kernel_info_->workspace_address_list()) {
MS_EXCEPTION_IF_NULL(workspace_address);
workspace_device_tensors_.emplace_back(workspace_address.get());
memory_alloc_list_.emplace_back(workspace_address.get());
memory_free_list_.emplace_back(workspace_address.get());
launch_info_.workspaces_.emplace_back(std::make_shared<Address>());
(void)workspace_device_tensors_.emplace_back(workspace_address.get());
(void)memory_alloc_list_.emplace_back(workspace_address.get());
(void)memory_free_list_.emplace_back(workspace_address.get());
(void)launch_info_.workspaces_.emplace_back(std::make_shared<Address>());
}
for (auto &external_reference_tensor : external_reference_tensors_) {
memory_free_list_.emplace_back(external_reference_tensor);
(void)memory_free_list_.emplace_back(external_reference_tensor);
}
// Init the output data.
@ -69,15 +69,15 @@ void KernelActor::Init() {
auto device_address = output_device_tensors_[data_arrow->from_output_index_];
auto data =
std::make_unique<OpData<DeviceTensor>>(data_arrow->to_op_id_, device_address, data_arrow->to_input_index_);
output_data_.emplace_back(data.get());
output_data_by_output_index_[data_arrow->from_output_index_].emplace_back(std::move(data));
(void)output_data_.emplace_back(data.get());
(void)output_data_by_output_index_[data_arrow->from_output_index_].emplace_back(std::move(data));
}
}
void KernelActor::RunOpData(OpData<DeviceTensor> *const input_data, OpContext<DeviceTensor> *const context) {
MS_EXCEPTION_IF_NULL(context);
auto &sequential_num = context->sequential_num_;
input_op_datas_[sequential_num].emplace_back(input_data);
(void)input_op_datas_[sequential_num].emplace_back(input_data);
if (input_data->data_ == nullptr) {
std::string error_info =
"Input data of actor:" + GetAID().Name() + " num:" + std::to_string(input_data->index_) + " is empty";
@ -103,7 +103,7 @@ void KernelActor::RunOpData(OpData<DeviceTensor> *const input_data, OpContext<De
void KernelActor::RunOpControl(AID *const input_control, OpContext<DeviceTensor> *const context) {
MS_EXCEPTION_IF_NULL(context);
auto &sequential_num = context->sequential_num_;
input_op_controls_[sequential_num].emplace_back(input_control);
(void)input_op_controls_[sequential_num].emplace_back(input_control);
// When all the inputs are collected, then allocate memory and callback launch.
if (CheckLaunchCondition(context)) {
// Infer kernel shape and update abstract info for dynamic shape kernel.

View File

@ -42,8 +42,8 @@ void FetchContinuousMemoryInfo(const CNodePtr &node, std::vector<DeviceTensorPtr
const auto &device_tensor = AnfAlgo::GetPrevNodeMutableOutputAddr(node, i, false);
MS_EXCEPTION_IF_NULL(device_tensor);
*total_size += intput_sizes[i];
size_list->emplace_back(intput_sizes[i]);
addr_list->emplace_back(device_tensor);
(void)size_list->emplace_back(intput_sizes[i]);
(void)addr_list->emplace_back(device_tensor);
}
} else {
const auto &output_sizes = kernel_mod->GetOutputSizeList();
@ -51,8 +51,8 @@ void FetchContinuousMemoryInfo(const CNodePtr &node, std::vector<DeviceTensorPtr
const auto &device_tensor = AnfAlgo::GetMutableOutputAddr(node, i, false);
MS_EXCEPTION_IF_NULL(device_tensor);
*total_size += output_sizes[i];
size_list->emplace_back(output_sizes[i]);
addr_list->emplace_back(device_tensor);
(void)size_list->emplace_back(output_sizes[i]);
(void)addr_list->emplace_back(device_tensor);
}
}
}
@ -65,19 +65,19 @@ void LoopCountActor::Init() {
// Inputs need continuous memory.
if (iter.second.first == true) {
FetchContinuousMemoryInfo(iter.first.first, &addr_list, &size_list, &total_size, true);
continuous_memory_alloc_list_list_.emplace_back(addr_list);
size_list_list_.emplace_back(size_list);
total_size_list_.emplace_back(total_size);
device_contexts_.emplace_back(iter.first.second);
(void)continuous_memory_alloc_list_list_.emplace_back(addr_list);
(void)size_list_list_.emplace_back(size_list);
(void)total_size_list_.emplace_back(total_size);
(void)device_contexts_.emplace_back(iter.first.second);
}
// Outputs need continuous memory.
if (iter.second.second == true) {
FetchContinuousMemoryInfo(iter.first.first, &addr_list, &size_list, &total_size, false);
continuous_memory_alloc_list_list_.emplace_back(addr_list);
size_list_list_.emplace_back(size_list);
total_size_list_.emplace_back(total_size);
device_contexts_.emplace_back(iter.first.second);
(void)continuous_memory_alloc_list_list_.emplace_back(addr_list);
(void)size_list_list_.emplace_back(size_list);
(void)total_size_list_.emplace_back(total_size);
(void)device_contexts_.emplace_back(iter.first.second);
}
}
}
@ -85,7 +85,7 @@ void LoopCountActor::Init() {
void LoopCountActor::RunOpControl(AID *const input_control, OpContext<DeviceTensor> *const context) {
MS_EXCEPTION_IF_NULL(context);
auto sequential_num = context->sequential_num_;
input_op_controls_[sequential_num].emplace_back(input_control);
(void)input_op_controls_[sequential_num].emplace_back(input_control);
if (CheckLoopCountIncreaseCondition(context)) {
IncreaseLoopCount(context);
}

View File

@ -440,7 +440,7 @@ void SwitchActor::SendOutput(OpContext<DeviceTensor> *context) {
auto &data = output_data[i];
MS_EXCEPTION_IF_NULL(data_arrow);
MS_EXCEPTION_IF_NULL(data);
data->data_ = input_device_tensors_[data_arrow->from_output_index_];
data->data_ = input_device_tensors_[IntToSize(data_arrow->from_output_index_)];
Async(data_arrow->to_op_id_, &OpActor::RunOpData, data.get(), context);
}

View File

@ -227,7 +227,7 @@ void UpdateDeviceAddressForInplaceNode(const KernelGraphPtr &graph) {
auto inplace_group_attr = primitive->GetAttr("inplace_group");
MS_EXCEPTION_IF_NULL(inplace_group_attr);
auto group_id = GetValue<uint32_t>(inplace_group_attr);
inplace_groups[group_id].emplace_back(kernel);
(void)inplace_groups[group_id].emplace_back(kernel);
}
const size_t kMinInplaceGroupSize = 2;

View File

@ -556,15 +556,15 @@ void GraphScheduler::Schedule(const ActorSet *actor_set) {
// Collect actors.
for (auto &data_source_actor : actor_set->data_source_actors_) {
MS_EXCEPTION_IF_NULL(data_source_actor);
actors.emplace_back(static_cast<ActorReference>(data_source_actor));
(void)actors.emplace_back(static_cast<ActorReference>(data_source_actor));
}
for (auto &kernel_actor : actor_set->kernel_actors_) {
MS_EXCEPTION_IF_NULL(kernel_actor);
actors.emplace_back(static_cast<ActorReference>(kernel_actor));
(void)actors.emplace_back(static_cast<ActorReference>(kernel_actor));
}
for (auto &switch_actor : actor_set->switch_actors_) {
MS_EXCEPTION_IF_NULL(switch_actor);
actors.emplace_back(static_cast<ActorReference>(switch_actor));
(void)actors.emplace_back(static_cast<ActorReference>(switch_actor));
}
for (auto &gather_actor : actor_set->gather_actors_) {
MS_EXCEPTION_IF_NULL(gather_actor);
@ -572,13 +572,13 @@ void GraphScheduler::Schedule(const ActorSet *actor_set) {
}
for (auto &copy_actor : actor_set->copy_actors_) {
MS_EXCEPTION_IF_NULL(copy_actor);
actors.emplace_back(static_cast<ActorReference>(copy_actor));
(void)actors.emplace_back(static_cast<ActorReference>(copy_actor));
}
if (actor_set->loop_count_actor_ != nullptr) {
actors.emplace_back(static_cast<ActorReference>(actor_set->loop_count_actor_));
(void)actors.emplace_back(static_cast<ActorReference>(actor_set->loop_count_actor_));
}
if (actor_set->output_actor_ != nullptr) {
actors.emplace_back(static_cast<ActorReference>(actor_set->output_actor_));
(void)actors.emplace_back(static_cast<ActorReference>(actor_set->output_actor_));
}
// Schedule actors.
@ -881,7 +881,7 @@ void GraphScheduler::Link(ActorSet *actor_set, const GraphCompilerInfo &graph_co
auto execution_order = graph->execution_order();
for (auto &kernel : execution_order) {
if (AnfAlgo::IsCommunicationOp(kernel)) {
communication_nodes.emplace_back(kernel);
(void)communication_nodes.emplace_back(kernel);
}
if (IsSkippedKernelActor(kernel) || (!IsKernelActor(kernel, graph_compiler_info.strategy_))) {
continue;
@ -896,7 +896,7 @@ void GraphScheduler::Link(ActorSet *actor_set, const GraphCompilerInfo &graph_co
LinkControlArrowByAutoMonad(kernel_actor, input_node, graph);
}
if (HasAbstractMonad(input_node)) {
auto_monad_actors.emplace_back(kernel_actor);
(void)auto_monad_actors.emplace_back(kernel_actor);
continue; // No data arrow for monad input.
}
@ -976,7 +976,7 @@ std::vector<DataSourceActorPtr> GraphScheduler::BuildDataSourceActor(const Graph
host_queue_ds_actor = std::make_shared<HostQueueDataSourceActor>(actor_name, 1, memory_manager_aid_, nullptr,
nullptr, host_queue);
InsertActor(host_queue_ds_actor.get());
data_source_actors.emplace_back(host_queue_ds_actor);
(void)data_source_actors.emplace_back(host_queue_ds_actor);
}
const auto &front_node = FetchFrontNodeByBackendNode(input_node, graph);
@ -986,8 +986,8 @@ std::vector<DataSourceActorPtr> GraphScheduler::BuildDataSourceActor(const Graph
host_queue_ds_actor->data_node_position_map_.emplace(input_node, front_node_position_temp_map[front_node]);
continue;
}
host_queue_ds_actor->data_nodes_.emplace_back(input_node);
host_queue_ds_actor->device_contexts_.emplace_back(device_context);
(void)host_queue_ds_actor->data_nodes_.emplace_back(input_node);
(void)host_queue_ds_actor->device_contexts_.emplace_back(device_context);
host_queue_ds_actor->data_node_position_map_.emplace(input_node, data_node_position);
front_node_position_temp_map.emplace(front_node, data_node_position);
data_node_position++;
@ -1007,7 +1007,7 @@ std::vector<DataSourceActorPtr> GraphScheduler::BuildDataSourceActor(const Graph
actor_name, 1, device_context, memory_manager_aid_, debug_aid_, recorder_aid_);
MS_EXCEPTION_IF_NULL(device_queue_ds_actor);
InsertActor(device_queue_ds_actor.get());
data_source_actors.emplace_back(device_queue_ds_actor);
(void)data_source_actors.emplace_back(device_queue_ds_actor);
device_queue_ds_actor->data_kernel_ = *iter;
device_queue_ds_actor->kernel_info_ = static_cast<device::KernelInfo *>((*iter)->kernel_info());
}
@ -1074,7 +1074,7 @@ std::vector<KernelActorPtr> GraphScheduler::BuildKernelActor(const GraphCompiler
memory_manager_aid_, debug_aid_, recorder_aid_, strategy);
MS_EXCEPTION_IF_NULL(kernel_actor);
InsertActor(kernel_actor.get());
kernel_actors.emplace_back(kernel_actor);
(void)kernel_actors.emplace_back(kernel_actor);
auto front_node = graph->GetFrontAnfByBackendAnf(kernel);
if (front_node != nullptr) {
front_node_to_actor_[front_node] = kernel_actor;
@ -1169,7 +1169,7 @@ std::vector<KernelActorPtr> GraphScheduler::BuildNoInputKernelActor(const ActorS
}
}
no_input_kernel_actors.emplace_back(kernel_actor);
(void)no_input_kernel_actors.emplace_back(kernel_actor);
}
}
return no_input_kernel_actors;
@ -1387,7 +1387,8 @@ void GraphScheduler::LinkDataArrow(KernelActor *to_actor, const GraphCompilerInf
to_kernel_with_input_idx);
} else if (IsPersistentDeviceTensor(from_kernel)) {
const auto devcie_tensor_store_key = FetchFrontNodeByBackendNode(from_kernel, graph);
to_actor->device_tensor_store_keys_.emplace_back(to_kernel_with_input_idx.second, devcie_tensor_store_key.get());
(void)to_actor->device_tensor_store_keys_.emplace_back(to_kernel_with_input_idx.second,
devcie_tensor_store_key.get());
} else {
// May exist the from kernel that no need link in the pynative mode.
MS_LOG(DEBUG) << "Invalid from kernel: " << from_kernel->fullname_with_scope();
@ -1414,7 +1415,7 @@ void GraphScheduler::LinkDataArrowForInternalParameter(const AnfNodePtr &interna
return;
}
if (IsPersistentDeviceTensor(front_output_node)) {
to_actor->device_tensor_store_keys_.emplace_back(to_kernel_with_input_idx.second, front_output_node.get());
(void)to_actor->device_tensor_store_keys_.emplace_back(to_kernel_with_input_idx.second, front_output_node.get());
return;
}
@ -1468,9 +1469,9 @@ void GraphScheduler::LinkDataArrowForDeviceDSActor(DeviceQueueDataSourceActor *c
} else {
auto to_aid = to_actor->GetAID();
auto op_arrow = std::make_shared<DataArrow>(from_output_index, to_aid, to_input_index);
from_actor->output_data_arrows_.emplace_back(op_arrow);
(void)from_actor->output_data_arrows_.emplace_back(op_arrow);
to_actor->input_datas_num_++;
to_actor->input_data_arrow_aids_.emplace_back(from_actor->GetAID());
(void)to_actor->input_data_arrow_aids_.emplace_back(from_actor->GetAID());
// Update the reference count of device tensor.
UpdateRefCount(from_kernel, from_output_index);
@ -1497,9 +1498,9 @@ void GraphScheduler::LinkDataArrowForHostDSActor(HostQueueDataSourceActor *const
} else {
auto to_aid = to_actor->GetAID();
auto op_arrow = std::make_shared<DataArrow>(position, to_aid, to_input_index);
from_actor->output_data_arrows_.emplace_back(op_arrow);
(void)from_actor->output_data_arrows_.emplace_back(op_arrow);
to_actor->input_datas_num_++;
to_actor->input_data_arrow_aids_.emplace_back(from_actor->GetAID());
(void)to_actor->input_data_arrow_aids_.emplace_back(from_actor->GetAID());
// Update the reference count of device tensor.
UpdateRefCount(from_actor->data_nodes_[position], from_output_index);
@ -1537,9 +1538,9 @@ void GraphScheduler::LinkDataArrowForKernelActor(KernelActor *from_actor, Kernel
} else {
auto to_aid = to_actor->GetAID();
auto op_arrow = std::make_shared<DataArrow>(from_output_index, to_aid, to_input_index);
from_actor->output_data_arrows_.emplace_back(op_arrow);
(void)from_actor->output_data_arrows_.emplace_back(op_arrow);
to_actor->input_datas_num_++;
to_actor->input_data_arrow_aids_.emplace_back(from_actor->GetAID());
(void)to_actor->input_data_arrow_aids_.emplace_back(from_actor->GetAID());
// Update the reference count of device tensor.
UpdateRefCount(from_kernel, from_output_index);
@ -1565,7 +1566,7 @@ void GraphScheduler::LinkDataArrowForCopyActor(OpActor<DeviceTensor> *const from
if (copy_actor == nullptr) {
// Create the copy actor.
auto copy_actor_shared_ptr = std::make_shared<CopyActor>(name, memory_manager_aid_);
copy_actors_.emplace_back(copy_actor_shared_ptr);
(void)copy_actors_.emplace_back(copy_actor_shared_ptr);
copy_actor = copy_actor_shared_ptr.get();
MS_EXCEPTION_IF_NULL(copy_actor);
InsertActor(copy_actor);
@ -1578,19 +1579,19 @@ void GraphScheduler::LinkDataArrowForCopyActor(OpActor<DeviceTensor> *const from
auto real_from_actor = dynamic_cast<DeviceQueueDataSourceActor *>(from_actor);
MS_EXCEPTION_IF_NULL(real_from_actor);
from_devcie_context = real_from_actor->device_context_;
real_from_actor->output_data_arrows_.emplace_back(op_arrow_to_copy);
(void)real_from_actor->output_data_arrows_.emplace_back(op_arrow_to_copy);
} else if (IsKernelActor(from_kernel)) {
auto real_from_actor = dynamic_cast<KernelActor *>(from_actor);
MS_EXCEPTION_IF_NULL(real_from_actor);
from_devcie_context = real_from_actor->device_context_;
real_from_actor->output_data_arrows_.emplace_back(op_arrow_to_copy);
(void)real_from_actor->output_data_arrows_.emplace_back(op_arrow_to_copy);
} else if (IsHostQueueDSActor(from_kernel)) {
auto real_from_actor = dynamic_cast<HostQueueDataSourceActor *>(from_actor);
MS_EXCEPTION_IF_NULL(real_from_actor);
auto position = real_from_actor->FetchDataNodePosition(from_kernel);
from_devcie_context = real_from_actor->device_contexts_[position];
op_arrow_to_copy->from_output_index_ = position;
real_from_actor->output_data_arrows_.emplace_back(op_arrow_to_copy);
op_arrow_to_copy->from_output_index_ = SizeToInt(position);
(void)real_from_actor->output_data_arrows_.emplace_back(op_arrow_to_copy);
from_device_tensor =
AnfAlgo::GetMutableOutputAddr(real_from_actor->data_nodes_[position], from_output_index, false);
}
@ -1616,7 +1617,7 @@ void GraphScheduler::LinkDataArrowForCopyActor(OpActor<DeviceTensor> *const from
// If the copy actor already exists, only need link between copy actor and to actor.
auto op_arrow_from_copy = std::make_shared<DataArrow>(0, to_actor->GetAID(), to_input_index);
copy_actor->output_data_arrows_.emplace_back(op_arrow_from_copy);
(void)copy_actor->output_data_arrows_.emplace_back(op_arrow_from_copy);
to_actor->input_datas_num_++;
UpdateRefCount(copy_actor->output_.get());
}
@ -1693,7 +1694,7 @@ void GraphScheduler::LinkControlArrowByAutoMonad(KernelActor *to_actor, const An
}
MS_LOG(INFO) << "Link control arrow by auto monad, from actor: " << from_actor->GetAID().Name()
<< ", to actor: " << to_actor->GetAID().Name();
from_actor->output_control_arrows_.emplace_back(to_actor->GetAID());
(void)from_actor->output_control_arrows_.emplace_back(to_actor->GetAID());
to_actor->input_controls_num_++;
}
}
@ -1712,7 +1713,7 @@ void GraphScheduler::LinkControlArrowBySkippedNode(KernelActor *to_actor, const
MS_EXCEPTION_IF_NULL(from_actor);
MS_LOG(INFO) << "Link control arrow by skipped node: " << skipped_node->fullname_with_scope()
<< ", from actor: " << from_actor->GetAID().Name() << ", to actor: " << to_actor->GetAID().Name();
from_actor->output_control_arrows_.emplace_back(to_aid);
(void)from_actor->output_control_arrows_.emplace_back(to_aid);
to_actor->input_controls_num_++;
}
}
@ -1732,17 +1733,17 @@ void GraphScheduler::LinkControlArrowBySendRecvNodes(const KernelGraphPtr &graph
for (auto &input_aid : to_allreduce_actor->input_data_arrow_aids_) {
auto input_actor = dynamic_cast<KernelActor *>(FetchActor(input_aid.Name()));
if (input_actor != nullptr) {
input_actor->output_control_arrows_.emplace_back(from_send_actor->GetAID());
(void)input_actor->output_control_arrows_.emplace_back(from_send_actor->GetAID());
from_send_actor->input_controls_num_++;
}
}
// from_send_actor --> from_recv_actor
from_send_actor->output_control_arrows_.emplace_back(from_recv_actor->GetAID());
(void)from_send_actor->output_control_arrows_.emplace_back(from_recv_actor->GetAID());
from_recv_actor->input_controls_num_++;
// from_recv_actor --> to_allreduce_actor
from_recv_actor->output_control_arrows_.emplace_back(to_allreduce_actor->GetAID());
(void)from_recv_actor->output_control_arrows_.emplace_back(to_allreduce_actor->GetAID());
to_allreduce_actor->input_controls_num_++;
}
@ -1756,18 +1757,18 @@ void GraphScheduler::LinkControlArrowBySendRecvNodes(const KernelGraphPtr &graph
auto to_recv_actor = dynamic_cast<KernelActor *>(FetchActor(to_recv_node->fullname_with_scope()));
// from_allreduce_actor --> to_send_actor
from_allreduce_actor->output_control_arrows_.emplace_back(to_send_actor->GetAID());
(void)from_allreduce_actor->output_control_arrows_.emplace_back(to_send_actor->GetAID());
to_send_actor->input_controls_num_++;
// to_send_actor --> to_recv_actor
to_send_actor->output_control_arrows_.emplace_back(to_recv_actor->GetAID());
(void)to_send_actor->output_control_arrows_.emplace_back(to_recv_actor->GetAID());
to_recv_actor->input_controls_num_++;
// to_recv_actor --> outputs of from_allreduce_actor
for (auto &output_data_arrow : from_allreduce_actor->output_data_arrows_) {
auto output_actor = dynamic_cast<KernelActor *>(FetchActor(output_data_arrow->to_op_id_.Name()));
if (output_actor != nullptr) {
to_recv_actor->output_control_arrows_.emplace_back(output_actor->GetAID());
(void)to_recv_actor->output_control_arrows_.emplace_back(output_actor->GetAID());
output_actor->input_controls_num_++;
}
}
@ -1778,7 +1779,7 @@ void GraphScheduler::LinkControlArrowBySendRecvNodes(const KernelGraphPtr &graph
auto device_tensor = AnfAlgo::GetPrevNodeMutableOutputAddr(from_allreduce_node, i, false);
MS_EXCEPTION_IF_NULL(device_tensor);
UpdateRefCount(device_tensor.get());
to_recv_actor->external_reference_tensors_.emplace_back(device_tensor.get());
(void)to_recv_actor->external_reference_tensors_.emplace_back(device_tensor.get());
}
}
}
@ -1796,7 +1797,7 @@ void GraphScheduler::LinkControlArrowByCommunicationNode(const std::vector<CNode
auto to_actor = dynamic_cast<KernelActor *>(FetchActor(communication_nodes[i]->fullname_with_scope()));
MS_EXCEPTION_IF_NULL(from_actor);
MS_EXCEPTION_IF_NULL(to_actor);
from_actor->output_control_arrows_.emplace_back(to_actor->GetAID());
(void)from_actor->output_control_arrows_.emplace_back(to_actor->GetAID());
to_actor->input_controls_num_++;
}
@ -1808,7 +1809,7 @@ void GraphScheduler::LinkControlArrowByCommunicationNode(const std::vector<CNode
auto from_actor = dynamic_cast<KernelActor *>(FetchActor(execution_order[i - 1]->fullname_with_scope()));
auto to_actor = dynamic_cast<KernelActor *>(FetchActor(execution_order[i]->fullname_with_scope()));
if ((from_actor != nullptr) && (to_actor != nullptr)) {
from_actor->output_control_arrows_.emplace_back(to_actor->GetAID());
(void)from_actor->output_control_arrows_.emplace_back(to_actor->GetAID());
to_actor->input_controls_num_++;
}
}
@ -1831,35 +1832,35 @@ void GraphScheduler::LinkControlArrowForLoopCountActor(LoopCountActor *loop_coun
parser->IsKernelInRootFuncGraph(kernel_actor->kernel_)) {
MS_EXCEPTION_IF_NULL(kernel_actor->kernel_);
MS_LOG(INFO) << kernel_actor->kernel_->fullname_with_scope() << " is not real used by other nodes.";
no_output_actors.emplace_back(kernel_actor.get());
(void)no_output_actors.emplace_back(kernel_actor.get());
}
}
for (auto &data_actor : actor_set->data_source_actors_) {
if ((data_actor->output_data_arrows_.size() == 0) && (data_actor->output_control_arrows_.size() == 0)) {
no_output_actors.emplace_back(data_actor.get());
(void)no_output_actors.emplace_back(data_actor.get());
}
}
for (auto &copy_actor : copy_actors_) {
if ((copy_actor->output_data_arrows_.size() == 0) && (copy_actor->output_control_arrows_.size() == 0)) {
no_output_actors.emplace_back(copy_actor.get());
(void)no_output_actors.emplace_back(copy_actor.get());
}
}
// No output actor --> loop count actor.
for (auto &no_output_actor : no_output_actors) {
no_output_actor->output_control_arrows_.emplace_back(loop_count_actor->GetAID());
(void)no_output_actor->output_control_arrows_.emplace_back(loop_count_actor->GetAID());
loop_count_actor->input_controls_num_++;
}
// Loop count actor --> data source actor.
for (auto &data_source_actor : actor_set->data_source_actors_) {
MS_EXCEPTION_IF_NULL(data_source_actor);
loop_count_actor->data_source_aids_.emplace_back(data_source_actor->GetAID());
(void)loop_count_actor->data_source_aids_.emplace_back(data_source_actor->GetAID());
}
// Loop count actor --> no input kernel actor.
for (auto &no_input_kernel_actor : actor_set->no_input_kernel_actors_) {
MS_EXCEPTION_IF_NULL(no_input_kernel_actor);
loop_count_actor->no_input_kernel_aids_.emplace_back(no_input_kernel_actor->GetAID());
(void)loop_count_actor->no_input_kernel_aids_.emplace_back(no_input_kernel_actor->GetAID());
no_input_kernel_actor->input_controls_num_++;
}
@ -1920,7 +1921,7 @@ void GraphScheduler::LinkOutputResultArrowForOutputActor(OutputActor *to_actor,
dynamic_cast<KernelActor *>(FetchActor(output_with_index.first->fullname_with_scope()));
MS_EXCEPTION_IF_NULL(from_actor);
auto op_arrow = std::make_shared<DataArrow>(output_with_index.second, to_actor->GetAID(), output_position);
from_actor->output_result_arrows_.emplace_back(op_arrow);
(void)from_actor->output_result_arrows_.emplace_back(op_arrow);
continue;
}
@ -1952,7 +1953,7 @@ void GraphScheduler::LinkOutputResultArrowForOutputActor(OutputActor *to_actor,
}
MS_EXCEPTION_IF_NULL(from_actor);
auto op_arrow = std::make_shared<DataArrow>(from_actor_output_index, to_actor->GetAID(), output_position);
from_actor->output_result_arrows_.emplace_back(op_arrow);
(void)from_actor->output_result_arrows_.emplace_back(op_arrow);
}
}
}
@ -2051,7 +2052,7 @@ void GraphScheduler::LinkDeviceTensorStoreForAutoMonadActor(const std::vector<Ke
}
auto copy_actor = std::make_shared<CopyActor>(name, memory_manager_aid_);
MS_EXCEPTION_IF_NULL(copy_actor);
copy_actors_.emplace_back(copy_actor);
(void)copy_actors_.emplace_back(copy_actor);
InsertActor(copy_actor.get());
// Set the member of the copy actor.
@ -2072,13 +2073,13 @@ void GraphScheduler::LinkDeviceTensorStoreForAutoMonadActor(const std::vector<Ke
<< "has control arrows number:" << kernel_actor->output_control_arrows_.size();
// Link from copy actor to kernel actor users.
for (auto &output_contorl : kernel_actor->output_control_arrows_) {
copy_actor->output_control_arrows_.emplace_back(output_contorl);
(void)copy_actor->output_control_arrows_.emplace_back(output_contorl);
}
// Move the control arrows from kernel actor to kernel actor users.
kernel_actor->output_control_arrows_.clear();
// Link from kernel actor to copy actor.
kernel_actor->output_control_arrows_.emplace_back(copy_actor->GetAID());
(void)kernel_actor->output_control_arrows_.emplace_back(copy_actor->GetAID());
copy_actor->input_controls_num_++;
}
}
@ -2617,7 +2618,7 @@ bool GraphScheduler::CheckActorValid(const ActorSet *actor_set, GraphExecutionSt
auto input_num = AnfAlgo::GetInputTensorNum(kernel_actor->kernel_);
auto input_data_num = kernel_actor->input_datas_num_;
auto device_tensor_store_num = kernel_actor->device_tensor_store_keys_.size();
if (input_data_num + device_tensor_store_num != input_num) {
if (input_data_num + IntToSize(device_tensor_store_num) != input_num) {
MS_LOG(ERROR) << "The input building of " << AnfAlgo::GetNodeDebugString(kernel_actor->kernel_)
<< " is wrong, input data num: " << input_data_num
<< ", device tensor store num: " << device_tensor_store_num << ", total input num: " << input_num;

View File

@ -54,7 +54,7 @@ size_t GetSystemMemorySize(const std::string &key) {
}
}
fclose(file);
(void)fclose(file);
return mem_size * kKBToByte;
#endif
}

View File

@ -809,7 +809,7 @@ void MindRTBackend::RunGraph(const ActorInfo &actor_info, const VectorRef &args,
const auto &front_node = kernel_graph->GetFrontAnfByBackendAnf(input_node);
PushTensor(args, origin_parameters, front_node, &input_tensor);
}
input_tensors.emplace_back(input_tensor);
(void)input_tensors.emplace_back(input_tensor);
}
// Input tensors of the control node.