fix kernel by kernel and mindRT parameter bug

This commit is contained in:
Parastoo Ashtari 2022-03-10 16:16:15 -05:00
parent e30446cca0
commit c61e56cbe2
7 changed files with 11 additions and 9 deletions

View File

@ -1574,7 +1574,8 @@ void Debugger::LoadSingleParameterMindRT(const AnfNodePtr &node) {
debug_services_->MoveTensorCurrentToPrev(tensor_name);
}
// Keep_prev is True for parameters.
bool ret = device_addr->LoadMemToHost(tensor_name, 0, format, int_shapes, type, 0, true, root_graph_id);
// force update for parameters.
bool ret = device_addr->LoadMemToHost(tensor_name, 0, format, int_shapes, type, 0, true, root_graph_id, true);
if (!ret) {
MS_LOG(ERROR) << "LoadMemToHost:"

View File

@ -155,7 +155,8 @@ bool CheckReadData(const CNodePtr &cnode) {
if (dump_json_parser.NeedDump(kernel_name)) {
read_data = true;
}
} else if (debugger->debugger_enabled()) {
}
if (debugger->debugger_enabled()) {
read_data = debugger->ReadNodeDataRequired(cnode);
}
return read_data;

View File

@ -649,11 +649,11 @@ bool AscendDeviceAddress::DumpMemToFile(const std::string &filepath, const std::
*/
bool AscendDeviceAddress::LoadMemToHost(const std::string &tensor_name, int execution_order, const std::string &,
const ShapeVector &host_shape, TypeId host_type, size_t slot, bool keep_prev,
uint32_t root_graph_id) const {
uint32_t root_graph_id, bool force_update) const {
bool ret = false;
auto debugger = Debugger::GetInstance();
MS_EXCEPTION_IF_NULL(debugger);
if (debugger->TensorExistsInCurrent(tensor_name)) {
if (debugger->TensorExistsInCurrent(tensor_name) && !force_update) {
MS_LOG(INFO) << tensor_name << " already loaded for this step so not loading it again.";
return true;
}

View File

@ -62,7 +62,7 @@ class AscendDeviceAddress : public DeviceAddress {
#ifdef ENABLE_DEBUGGER
bool LoadMemToHost(const std::string &tensor_name, int execution_order, const std::string &host_fmt,
const ShapeVector &host_shape, TypeId host_type, size_t slot, bool keep_prev,
uint32_t root_graph_id = 0) const override;
uint32_t root_graph_id = 0, bool force_update = 0) const override;
#endif
private:

View File

@ -185,14 +185,14 @@ GPUDeviceAddress::~GPUDeviceAddress() { ClearDeviceMemory(); }
#ifdef ENABLE_DEBUGGER
bool GPUDeviceAddress::LoadMemToHost(const std::string &tensor_name, int execution_order, const std::string &host_fmt,
const ShapeVector &host_shape, TypeId host_type, size_t slot, bool keep_prev,
uint32_t root_graph_id) const {
uint32_t root_graph_id, bool force_update) const {
bool ret = false;
if (size_ == 0) {
return true;
}
MS_EXCEPTION_IF_NULL(Debugger::GetInstance());
if (Debugger::GetInstance()->TensorExistsInCurrent(tensor_name)) {
if (Debugger::GetInstance()->TensorExistsInCurrent(tensor_name) && !force_update) {
MS_LOG(INFO) << tensor_name << " already loaded for this step so not loading it again.";
return true;
}

View File

@ -56,7 +56,7 @@ class GPUDeviceAddress : public DeviceAddress {
#ifdef ENABLE_DEBUGGER
bool LoadMemToHost(const std::string &tensor_name, int execution_order, const std::string &host_fmt,
const ShapeVector &host_shape, TypeId host_type, size_t slot, bool keep_prev,
uint32_t root_graph_id = 0) const override;
uint32_t root_graph_id = 0, bool force_update = 0) const override;
#endif
private:

View File

@ -141,7 +141,7 @@ class DeviceAddress : public mindspore::DeviceSync {
#ifdef ENABLE_DEBUGGER
virtual bool LoadMemToHost(const std::string &tensor_name, int execution_order, const std::string &host_fmt,
const ShapeVector &host_shape, TypeId host_type, size_t slot, bool keep_prev,
uint32_t root_graph_id = 0) const {
uint32_t root_graph_id = 0, bool force_update = 0) const {
return true;
}
#endif