forked from OSSInnovation/mindspore
clear warning
This commit is contained in:
parent
f65756162e
commit
71b6370b4d
|
@ -375,7 +375,6 @@ bool AscendDeviceAddress::LoadMemToHost(bool trans_flag, const std::string &tens
|
|||
tensor_data->SetTensor(out_tensor);
|
||||
tensor_data->SetSlot(slot);
|
||||
ret = tensor_loader->LoadNewTensor(tensor_data);
|
||||
|
||||
} else {
|
||||
mindspore::tensor::TensorPtr out_tensor = std::make_shared<tensor::Tensor>(type_id_, host_shape);
|
||||
size_t host_size = out_tensor->data().nbytes();
|
||||
|
@ -395,7 +394,6 @@ bool AscendDeviceAddress::LoadMemToHost(bool trans_flag, const std::string &tens
|
|||
return ret;
|
||||
}
|
||||
#endif
|
||||
|
||||
} // namespace ascend
|
||||
} // namespace device
|
||||
} // namespace mindspore
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#define PATH_MAX 0x3ffff
|
||||
#include "device/ascend/ascend_kernel_runtime.h"
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
@ -21,7 +21,6 @@
|
|||
#include <utility>
|
||||
#include <exception>
|
||||
#include <algorithm>
|
||||
|
||||
#include "device/ascend/ascend_device_address.h"
|
||||
#include "device/cpu/mpi/mpi_adapter.h"
|
||||
#include "utils/context/ms_context.h"
|
||||
|
@ -433,7 +432,6 @@ bool AscendKernelRuntime::GenTask(const session::KernelGraph *graph) {
|
|||
assign_instance.GetWaitStreams(&wait_active_stream_list);
|
||||
std::vector<uint32_t> force_copy_stream_list;
|
||||
assign_instance.GetHcomStreams(&force_copy_stream_list);
|
||||
|
||||
MS_LOG(INFO) << "call DavinciModel total stream num:" << resource_manager.get_cur_stream_num()
|
||||
<< ", total event num:" << resource_manager.get_cur_event_num()
|
||||
<< ", total label num:" << label_assign_instance.GetLabelNum(NOT_NULL(graph))
|
||||
|
@ -444,7 +442,6 @@ bool AscendKernelRuntime::GenTask(const session::KernelGraph *graph) {
|
|||
task_info_list, empty_list, empty_list, empty_list, empty_list, wait_active_stream_list, force_copy_stream_list, 0,
|
||||
0, 0, 0, 0, 0, resource_manager.get_cur_stream_num(), label_assign_instance.GetLabelNum(NOT_NULL(graph)),
|
||||
resource_manager.get_cur_event_num(), 0);
|
||||
|
||||
auto ret = graph_model_map_.insert(std::make_pair(graph->graph_id(), model));
|
||||
if (!ret.second) {
|
||||
MS_LOG(EXCEPTION) << "Duplicate GraphId! Please check in ascend_session.";
|
||||
|
@ -625,6 +622,10 @@ bool AscendKernelRuntime::HcclInit() {
|
|||
return false;
|
||||
}
|
||||
}
|
||||
if (strlen(config_path_str) > PATH_MAX) {
|
||||
MS_LOG(ERROR) << "file path oversize";
|
||||
return false;
|
||||
}
|
||||
std::string rank_id_str = GetRankId();
|
||||
auto full_path = realpath(config_path_str, nullptr);
|
||||
if (full_path == nullptr) {
|
||||
|
|
|
@ -24,7 +24,6 @@
|
|||
#include <map>
|
||||
#include <unordered_map>
|
||||
#include <unordered_set>
|
||||
|
||||
#include "common/utils.h"
|
||||
#include "debug/anf_ir_dump.h"
|
||||
#include "operator/ops.h"
|
||||
|
|
|
@ -13,7 +13,6 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "device/cpu/mpi/mpi_adapter.h"
|
||||
#ifdef ENABLE_MPI
|
||||
#include <algorithm>
|
||||
|
@ -262,9 +261,7 @@ bool MPIAdapter::AllGather(const float *input, float *output, const std::vector<
|
|||
if (comm == MPI_COMM_NULL) {
|
||||
RAISE_EXCEPTION_WITH_PARAM("create mpi comm fail! rankid:", rank_id_);
|
||||
}
|
||||
|
||||
auto ret = MPI_Allgather(input, data_num, MPI_FLOAT, output, data_num, MPI_FLOAT, comm);
|
||||
|
||||
if (ret != MPI_SUCCESS) {
|
||||
RAISE_EXCEPTION_WITH_PARAM("mpi allgater fail!ret = ", ret);
|
||||
}
|
||||
|
|
|
@ -56,9 +56,14 @@ KernelRuntime *KernelRuntimeManager::GetSingleKernelRuntime(const std::string &d
|
|||
auto cur_runtime_key = runtime_map_.begin()->first;
|
||||
auto find_pos = cur_runtime_key.rfind('_');
|
||||
if (find_pos != std::string::npos) {
|
||||
auto cur_device_id = cur_runtime_key.substr(find_pos + 1);
|
||||
MS_LOG(EXCEPTION) << "Can't change device id in runtime, already set device id: " << cur_device_id
|
||||
<< ", set device id: " << device_id << " failed";
|
||||
if (cur_runtime_key.size() > find_pos + 1) {
|
||||
auto cur_device_id = cur_runtime_key.substr(find_pos + 1);
|
||||
MS_LOG(EXCEPTION) << "Can't change device id in runtime, already set device id: " << cur_device_id
|
||||
<< ", set device id: " << device_id << " failed";
|
||||
} else {
|
||||
MS_LOG(EXCEPTION) << "Can't change device id in runtime, current runtime_key size error, set device id: "
|
||||
<< device_id << " failed";
|
||||
}
|
||||
}
|
||||
}
|
||||
return GetKernelRuntime(device_name, device_id);
|
||||
|
|
|
@ -721,19 +721,16 @@ std::vector<std::pair<AnfNodePtr, size_t>> GetOutputIndex(const std::vector<AnfN
|
|||
MS_EXCEPTION_IF_NULL(output);
|
||||
bool found = false;
|
||||
auto pree_node = AnfAlgo::VisitKernel(output, 0);
|
||||
|
||||
auto pos = std::find(std::begin(node_list), std::end(node_list), pree_node.first);
|
||||
if (pos != std::end(node_list)) {
|
||||
output_index.push_back(pree_node);
|
||||
continue;
|
||||
}
|
||||
|
||||
auto ret = std::find(std::begin(input_list), std::end(input_list), pree_node.first);
|
||||
if (ret != std::end(input_list)) {
|
||||
output_index.push_back(std::make_pair(pree_node.first, 0));
|
||||
found = true;
|
||||
}
|
||||
|
||||
if (!found) {
|
||||
MS_EXCEPTION(ArgumentError) << "Output [" << i << "][" << output->DebugString(2) << "] of ["
|
||||
<< output->func_graph()->ToString() << "] found no related kernel info.";
|
||||
|
@ -744,18 +741,14 @@ std::vector<std::pair<AnfNodePtr, size_t>> GetOutputIndex(const std::vector<AnfN
|
|||
|
||||
void GetValidKernelNodes(const FuncGraphPtr &func_graph, std::vector<AnfNodePtr> *node_list) {
|
||||
MS_EXCEPTION_IF_NULL(node_list);
|
||||
|
||||
MS_EXCEPTION_IF_NULL(func_graph);
|
||||
|
||||
std::vector<AnfNodePtr> node_lists = TopoSort(func_graph->get_return());
|
||||
for (auto const &node : node_lists) {
|
||||
if (!AnfAlgo::IsRealKernel(node) || !node->isa<CNode>()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
auto cnode = node->cast<CNodePtr>();
|
||||
MS_EXCEPTION_IF_NULL(cnode);
|
||||
|
||||
if (IsValueNode<Primitive>(cnode->input(kAnfPrimitiveIndex))) {
|
||||
node_list->push_back(node);
|
||||
}
|
||||
|
|
|
@ -115,9 +115,9 @@ bool EmbeddingLookUpCPUKernel::Launch(const std::vector<kernel::AddressPtr> &inp
|
|||
return true;
|
||||
}
|
||||
|
||||
void LookUpTable_task(const float *input_addr, float *output_addr, int *indices_addr, size_t indices_lens, size_t num,
|
||||
size_t dim0, size_t dim1, size_t dim2, int offset, size_t axis, std::vector<size_t> input_shape,
|
||||
size_t input_lens) {
|
||||
void LookUpTable_task(const float *input_addr, float *output_addr, const int *indices_addr, size_t indices_lens,
|
||||
size_t num, size_t dim0, size_t dim1, size_t dim2, int offset, size_t axis,
|
||||
std::vector<size_t> input_shape, size_t input_lens) {
|
||||
size_t lens = num * sizeof(float);
|
||||
for (size_t i = 0; i < indices_lens; ++i) {
|
||||
int indices = indices_addr[i] - offset;
|
||||
|
@ -134,7 +134,6 @@ void LookUpTable_task(const float *input_addr, float *output_addr, int *indices_
|
|||
} else if (axis == 0) {
|
||||
pos = CPUKernelUtils::CalcOffset(input_shape, index, 0, 0, 0);
|
||||
}
|
||||
|
||||
if (pos + num <= input_lens) {
|
||||
auto ret = memcpy_s(output_addr, lens, input_addr + pos, lens);
|
||||
if (ret != EOK) {
|
||||
|
|
|
@ -55,7 +55,6 @@ void ReduceCPUKernel::InitKernel(const CNodePtr &kernel_node) {
|
|||
}
|
||||
} else if (axis_addr->isa<Int32Imm>()) {
|
||||
int axis = AnfAlgo::GetNodeAttr<int>(kernel_node, AXIS);
|
||||
|
||||
if (axis >= 0 && IntToSize(axis) >= shape_.size()) {
|
||||
MS_LOG(EXCEPTION) << "axis value is oversize.";
|
||||
}
|
||||
|
|
|
@ -46,7 +46,6 @@ MS_REG_CPU_KERNEL(ReduceMax, KernelAttr().AddInputAttr(kNumberTypeFloat32).AddOu
|
|||
ReduceCPUKernel);
|
||||
MS_REG_CPU_KERNEL(ReduceSum, KernelAttr().AddInputAttr(kNumberTypeFloat32).AddOutputAttr(kNumberTypeFloat32),
|
||||
ReduceCPUKernel);
|
||||
|
||||
} // namespace kernel
|
||||
} // namespace mindspore
|
||||
#endif // MINDSPORE_CCSRC_KERNEL_CPU_REDUCE_CPU_KERNEL_H_
|
||||
|
|
Loading…
Reference in New Issue