!15552 use host shape instead of device shape for debugger

From: @john_tzanakakis
Reviewed-by: @yelihua,@pandoublefeng
Signed-off-by: @pandoublefeng
This commit is contained in:
mindspore-ci-bot 2021-04-23 21:32:59 +08:00 committed by Gitee
commit a000f39764
3 changed files with 5 additions and 16 deletions

View File

@ -1157,10 +1157,7 @@ void Debugger::LoadSingleAnfnode(const AnfNodePtr &anf_node, const size_t output
}
auto format = kOpFormat_DEFAULT;
string tensor_name = node_name + ':' + "0";
ShapeVector int_shapes;
auto shape = AnfAlgo::GetOutputDeviceShape(anf_node, output_index);
(void)std::transform(shape.begin(), shape.end(), std::back_inserter(int_shapes),
[](size_t inner_item) { return SizeToInt(inner_item); });
ShapeVector int_shapes = trans::GetRuntimePaddingShape(anf_node, output_index);
bool keep_prev;
if (anf_node->isa<Parameter>()) {
keep_prev = true;
@ -1222,10 +1219,7 @@ void Debugger::LoadGraphOutputs() {
}
auto format = kOpFormat_DEFAULT;
string tensor_name = kernel_name + ':' + std::to_string(j);
ShapeVector int_shapes;
auto shape = AnfAlgo::GetOutputDeviceShape(node, j);
(void)std::transform(shape.begin(), shape.end(), std::back_inserter(int_shapes),
[](size_t inner_item) { return SizeToInt(inner_item); });
ShapeVector int_shapes = trans::GetRuntimePaddingShape(node, j);
auto ret = addr->LoadMemToHost(tensor_name, exec_order, format, int_shapes, type, j, false);
if (!ret) {
MS_LOG(ERROR) << "LoadMemToHost:"

View File

@ -25,6 +25,7 @@
#include "backend/session/kernel_graph.h"
#include "debug/debugger/grpc_client.h"
#include "debug/debug_services.h"
#include "common/trans.h"
using debugger::Chunk;
using debugger::DataType;

View File

@ -157,10 +157,7 @@ void LoadKernelData(Debugger *debugger, const CNodePtr &kernel,
auto format = kOpFormat_DEFAULT;
auto gpu_addr = std::make_unique<GPUDeviceAddress>(addr->addr, addr->size, format, type);
string input_tensor_name = input_kernel_name + ':' + "0";
ShapeVector int_shapes;
auto shape = AnfAlgo::GetOutputDeviceShape(input_kernel, PARAMETER_OUTPUT_INDEX);
(void)std::transform(shape.begin(), shape.end(), std::back_inserter(int_shapes),
[](size_t inner_item) { return SizeToInt(inner_item); });
ShapeVector int_shapes = trans::GetRuntimePaddingShape(input_kernel, PARAMETER_OUTPUT_INDEX);
auto ret = gpu_addr->LoadMemToHost(input_tensor_name, exec_order, format, int_shapes, type, 0, true);
if (!ret) {
MS_LOG(ERROR) << "LoadMemToHost:"
@ -187,10 +184,7 @@ void LoadKernelData(Debugger *debugger, const CNodePtr &kernel,
auto format = kOpFormat_DEFAULT;
auto gpu_addr = std::make_unique<GPUDeviceAddress>(addr->addr, addr->size, format, type);
string tensor_name = kernel_name + ':' + std::to_string(j);
ShapeVector int_shapes;
auto shape = AnfAlgo::GetOutputDeviceShape(kernel, j);
(void)std::transform(shape.begin(), shape.end(), std::back_inserter(int_shapes),
[](size_t inner_item) { return SizeToInt(inner_item); });
ShapeVector int_shapes = trans::GetRuntimePaddingShape(kernel, j);
auto ret = gpu_addr->LoadMemToHost(tensor_name, exec_order, format, int_shapes, type, j, false);
if (!ret) {
MS_LOG(ERROR) << "LoadMemToHost:"