!17313 fix static check
From: @jiang-shuqiang Reviewed-by: @yelihua,@ouwenchang Signed-off-by: @ouwenchang
This commit is contained in:
commit
c77e7de1dc
|
@ -235,9 +235,8 @@ bool Debugger::CheckDebuggerDumpEnabled() const {
|
|||
|
||||
bool Debugger::CheckDebuggerEnabled() const {
|
||||
// get env variables to configure debugger
|
||||
const char *env_enable_char = std::getenv("ENABLE_MS_DEBUGGER");
|
||||
if (env_enable_char != nullptr) {
|
||||
std::string env_enable_str = env_enable_char;
|
||||
std::string env_enable_str = common::GetEnv("ENABLE_MS_DEBUGGER");
|
||||
if (!env_enable_str.empty()) {
|
||||
(void)std::transform(env_enable_str.begin(), env_enable_str.end(), env_enable_str.begin(), ::tolower);
|
||||
if ((env_enable_str == "1" || env_enable_str == "true") && device_target_ != kCPUDevice) {
|
||||
return true;
|
||||
|
@ -248,9 +247,8 @@ bool Debugger::CheckDebuggerEnabled() const {
|
|||
|
||||
void Debugger::CheckDebuggerEnabledParam() const {
|
||||
// check the value of env variable ENABLE_MS_DEBUGGER
|
||||
const char *env_enable_char = std::getenv("ENABLE_MS_DEBUGGER");
|
||||
if (env_enable_char != nullptr) {
|
||||
std::string env_enable_str = env_enable_char;
|
||||
std::string env_enable_str = common::GetEnv("ENABLE_MS_DEBUGGER");
|
||||
if (!env_enable_str.empty()) {
|
||||
(void)std::transform(env_enable_str.begin(), env_enable_str.end(), env_enable_str.begin(), ::tolower);
|
||||
if (env_enable_str != "0" && env_enable_str != "1" && env_enable_str != "false" && env_enable_str != "true") {
|
||||
MS_LOG(WARNING) << "Env variable ENABLE_MS_DEBUGGER should be True/False/1/0 (case insensitive), but get: "
|
||||
|
@ -260,10 +258,10 @@ void Debugger::CheckDebuggerEnabledParam() const {
|
|||
}
|
||||
|
||||
bool Debugger::CheckDebuggerPartialMemoryEnabled() const {
|
||||
const char *env_partial_mem_str = std::getenv("MS_DEBUGGER_PARTIAL_MEM");
|
||||
if (env_partial_mem_str != nullptr) {
|
||||
std::string env_partial_mem_str = common::GetEnv("MS_DEBUGGER_PARTIAL_MEM");
|
||||
if (!env_partial_mem_str.empty()) {
|
||||
MS_LOG(INFO) << "Getenv MS_DEBUGGER_PARTIAL_MEM: " << env_partial_mem_str;
|
||||
if (std::strcmp(env_partial_mem_str, "1") == 0) {
|
||||
if (env_partial_mem_str == "1") {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
@ -751,7 +749,7 @@ void Debugger::ProcessKViewCMD(const EventReply &reply) {
|
|||
MS_LOG(INFO) << "tensor dtype: " << tensor.data_type();
|
||||
}
|
||||
EventReply send_tensors_reply = grpc_client_->SendTensors(tensors);
|
||||
if (send_tensors_reply.status() != send_tensors_reply.OK) {
|
||||
if (send_tensors_reply.status() != debugger::EventReply::OK) {
|
||||
MS_LOG(ERROR) << "Error: SendTensors failed";
|
||||
}
|
||||
}
|
||||
|
@ -1052,7 +1050,7 @@ void Debugger::SetStepNum(int32_t cur_num_step) {
|
|||
|
||||
int32_t Debugger::step_num() const { return num_step_; }
|
||||
|
||||
uint64_t BytestoInt64(const std::vector<char> &buffer) {
|
||||
uint64_t BytestoUInt64(const std::vector<char> &buffer) {
|
||||
uint64_t ret = (uint64_t)buffer[0];
|
||||
const int SHIFT = 8;
|
||||
const int MAX_INDEX = 8;
|
||||
|
@ -1092,16 +1090,18 @@ std::vector<std::string> Debugger::CheckOpOverflow() {
|
|||
continue;
|
||||
}
|
||||
MS_LOG(INFO) << "Open overflow bin file " << file_name;
|
||||
const uint32_t offset = 313;
|
||||
const uint32_t offset = 321;
|
||||
infile.seekg(offset, std::ios::beg);
|
||||
std::vector<char> buffer;
|
||||
const size_t buf_size = 256;
|
||||
buffer.resize(buf_size);
|
||||
infile.read(buffer.data(), buf_size);
|
||||
const uint8_t stream_id_offset = 8;
|
||||
const uint8_t task_id_offset = 16;
|
||||
uint64_t stream_id = BytestoInt64(std::vector<char>(buffer.begin() + stream_id_offset, buffer.end()));
|
||||
uint64_t task_id = BytestoInt64(std::vector<char>(buffer.begin() + task_id_offset, buffer.end()));
|
||||
const uint8_t stream_id_offset = 16;
|
||||
const uint8_t task_id_offset = 24;
|
||||
// The stream_id and task_id in the dump file are 8 byte fields for extensibility purpose, but only hold 4
|
||||
// byte values currently.
|
||||
uint64_t stream_id = BytestoUInt64(std::vector<char>(buffer.begin() + stream_id_offset, buffer.end()));
|
||||
uint64_t task_id = BytestoUInt64(std::vector<char>(buffer.begin() + task_id_offset, buffer.end()));
|
||||
MS_LOG(INFO) << "Overflow stream_id " << stream_id << ", task_id " << task_id << ".";
|
||||
auto op = debugger_->stream_task_to_opname_.find(std::make_pair(stream_id, task_id));
|
||||
if (op != debugger_->stream_task_to_opname_.end()) {
|
||||
|
@ -1146,11 +1146,12 @@ bool Debugger::CheckPort(const std::string &port) const {
|
|||
int num = 0;
|
||||
const int min_port_num = 1;
|
||||
const int max_port_num = 65535;
|
||||
const int decimal = 10;
|
||||
if (port[0] == '0' && port[1] != '\0') return false;
|
||||
int i = 0;
|
||||
while (port[i] != '\0') {
|
||||
if (port[i] < '0' || port[i] > '9') return false;
|
||||
num = num * 10 + (port[i] - '0');
|
||||
num = num * decimal + (port[i] - '0');
|
||||
if (num > max_port_num) return false;
|
||||
i++;
|
||||
}
|
||||
|
|
|
@ -286,6 +286,6 @@ bool GetMiVersionMatched(const EventReply &reply);
|
|||
// get the full name of a tensor, which is the name used in TensorLoader
|
||||
std::string GetTensorFullName(const TensorProto &tensor);
|
||||
|
||||
uint64_t BytestoInt64(const std::vector<char> &buffer);
|
||||
uint64_t BytestoUInt64(const std::vector<char> &buffer);
|
||||
} // namespace mindspore
|
||||
#endif // MINDSPORE_CCSRC_DEBUG_DEBUGGER_DEBUGGER_H_
|
||||
|
|
|
@ -46,7 +46,7 @@ KernelRuntime::~KernelRuntime() {}
|
|||
|
||||
bool KernelRuntime::Load(session::KernelGraph *graph, bool is_task_sink) { return true; }
|
||||
|
||||
bool KernelRuntime::LoadData(session::KernelGraph *graph) { return false; }
|
||||
bool KernelRuntime::LoadData(session::KernelGraph *) { return false; }
|
||||
|
||||
bool KernelRuntime::NodeOutputDeviceAddressExist(const AnfNodePtr &kernel, size_t index) {
|
||||
MS_EXCEPTION_IF_NULL(kernel);
|
||||
|
|
|
@ -132,11 +132,6 @@ bool EventWriter::Shut() noexcept {
|
|||
return result;
|
||||
}
|
||||
|
||||
// Summary Record Format:
|
||||
// 1 uint64 : data length
|
||||
// 2 uint32 : mask crc value of data length
|
||||
// 3 bytes : data
|
||||
// 4 uint32 : mask crc value of data
|
||||
bool EventWriter::WriteRecord(const std::string &data) {
|
||||
if (event_file_ == nullptr) {
|
||||
MS_LOG(ERROR) << "Writer not initialized or previously closed.";
|
||||
|
|
|
@ -275,7 +275,7 @@ class ImageClassificationRunner:
|
|||
print("Start running and writing......")
|
||||
begin = time()
|
||||
|
||||
self._summary_timestamp = self._extract_timestamp(summary.event_file_name)
|
||||
self._summary_timestamp = self._extract_timestamp(summary.file_info['file_name'])
|
||||
if self._summary_timestamp is None:
|
||||
raise RuntimeError("Cannot extract timestamp from summary filename!"
|
||||
" It should contains a timestamp after 'summary.' .")
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
# limitations under the License.
|
||||
# ============================================================================
|
||||
"""Generate the summary event which conform to proto format."""
|
||||
import io
|
||||
import platform
|
||||
import time
|
||||
|
||||
|
@ -285,8 +286,7 @@ def _fill_histogram_summary(tag: str, np_value: np.ndarray, summary) -> None:
|
|||
invalids = []
|
||||
for isfn in np.isnan, np.isposinf, np.isneginf:
|
||||
if total - valid > sum(invalids):
|
||||
count = np.count_nonzero(isfn(np_value))
|
||||
invalids.append(count)
|
||||
invalids.append(np.count_nonzero(isfn(np_value)))
|
||||
else:
|
||||
invalids.append(0)
|
||||
|
||||
|
@ -308,21 +308,33 @@ def _fill_histogram_summary(tag: str, np_value: np.ndarray, summary) -> None:
|
|||
summary.min = ma_value.min()
|
||||
summary.max = ma_value.max()
|
||||
summary.sum = ma_value.sum(dtype=np.float64)
|
||||
bins = _calc_histogram_bins(valid)
|
||||
first_edge, last_edge = summary.min, summary.max
|
||||
_fill_bucket(valid, np_value, summary)
|
||||
|
||||
if not first_edge < last_edge:
|
||||
first_edge -= 0.5
|
||||
last_edge += 0.5
|
||||
|
||||
bins = np.linspace(first_edge, last_edge, bins + 1, dtype=np_value.dtype)
|
||||
hists, edges = np.histogram(np_value, bins=bins)
|
||||
def _fill_bucket(valid, np_value, summary):
|
||||
"""
|
||||
Fill the bucket.
|
||||
|
||||
for hist, edge1, edge2 in zip(hists, edges, edges[1:]):
|
||||
bucket = summary.buckets.add()
|
||||
bucket.width = edge2 - edge1
|
||||
bucket.count = hist
|
||||
bucket.left = edge1
|
||||
Args:
|
||||
valid (int): The count of valid data.
|
||||
np_value (np.ndarray): Summary data.
|
||||
summary (summary_pb2.Summary.Histogram): Summary histogram data.
|
||||
"""
|
||||
bins = _calc_histogram_bins(valid)
|
||||
first_edge, last_edge = summary.min, summary.max
|
||||
|
||||
if not first_edge < last_edge:
|
||||
first_edge -= 0.5
|
||||
last_edge += 0.5
|
||||
|
||||
bins = np.linspace(first_edge, last_edge, bins + 1, dtype=np_value.dtype)
|
||||
hists, edges = np.histogram(np_value, bins=bins)
|
||||
|
||||
for hist, edge1, edge2 in zip(hists, edges, edges[1:]):
|
||||
bucket = summary.buckets.add()
|
||||
bucket.width = edge2 - edge1
|
||||
bucket.count = hist
|
||||
bucket.left = edge1
|
||||
|
||||
|
||||
def _fill_image_summary(tag: str, np_value, summary_image, input_format='NCHW'):
|
||||
|
@ -386,7 +398,6 @@ def _make_image(tensor, rescale=1):
|
|||
scaled_width = int(width * rescale)
|
||||
image = Image.fromarray(tensor)
|
||||
image = image.resize((scaled_width, scaled_height), Image.ANTIALIAS)
|
||||
import io
|
||||
output = io.BytesIO()
|
||||
image.save(output, format='PNG')
|
||||
image_string = output.getvalue()
|
||||
|
|
|
@ -145,13 +145,20 @@ class SummaryRecord:
|
|||
def __init__(self, log_dir, file_prefix="events", file_suffix="_MS",
|
||||
network=None, max_file_size=None, raise_exception=False, export_options=None):
|
||||
|
||||
self._closed, self._event_writer = False, None
|
||||
self._event_writer = None
|
||||
self._mode, self._data_pool = 'train', defaultdict(list)
|
||||
|
||||
self._status = {
|
||||
'closed': False,
|
||||
'has_graph': False
|
||||
}
|
||||
self.file_info = {
|
||||
'file_name': None,
|
||||
'file_path': None
|
||||
}
|
||||
Validator.check_str_by_regular(file_prefix)
|
||||
Validator.check_str_by_regular(file_suffix)
|
||||
|
||||
self.log_path = _make_directory(log_dir)
|
||||
log_path = _make_directory(log_dir)
|
||||
|
||||
if not isinstance(max_file_size, (int, type(None))):
|
||||
raise TypeError("The 'max_file_size' should be int type.")
|
||||
|
@ -165,24 +172,21 @@ class SummaryRecord:
|
|||
|
||||
Validator.check_value_type(arg_name='raise_exception', arg_value=raise_exception, valid_types=bool)
|
||||
|
||||
self.prefix = file_prefix
|
||||
self.suffix = file_suffix
|
||||
self.network = network
|
||||
self.has_graph = False
|
||||
|
||||
time_second = str(int(time.time()))
|
||||
# create the summary writer file
|
||||
self.event_file_name = get_event_file_name(self.prefix, self.suffix, time_second)
|
||||
self.full_file_name = os.path.join(self.log_path, self.event_file_name)
|
||||
self.file_info['file_name'] = get_event_file_name(file_prefix, file_suffix, time_second)
|
||||
self.file_info['file_path'] = os.path.join(log_path, self.file_info.get('file_name'))
|
||||
|
||||
self._export_options = process_export_options(export_options)
|
||||
export_dir = ''
|
||||
if self._export_options is not None:
|
||||
export_dir = "export_{}".format(time_second)
|
||||
|
||||
filename_dict = dict(summary=self.event_file_name,
|
||||
lineage=get_event_file_name(self.prefix, '_lineage', time_second),
|
||||
explainer=get_event_file_name(self.prefix, '_explain', time_second),
|
||||
filename_dict = dict(summary=self.file_info.get('file_name'),
|
||||
lineage=get_event_file_name(file_prefix, '_lineage', time_second),
|
||||
explainer=get_event_file_name(file_prefix, '_explain', time_second),
|
||||
exporter=export_dir)
|
||||
self._event_writer = WriterPool(log_dir,
|
||||
max_file_size,
|
||||
|
@ -193,7 +197,7 @@ class SummaryRecord:
|
|||
|
||||
def __enter__(self):
|
||||
"""Enter the context manager."""
|
||||
if self._closed:
|
||||
if self._status.get('closed'):
|
||||
raise ValueError('SummaryRecord has been closed.')
|
||||
return self
|
||||
|
||||
|
@ -314,11 +318,11 @@ class SummaryRecord:
|
|||
Validator.check_value_type(arg_name='step', arg_value=step, valid_types=int)
|
||||
Validator.check_value_type(arg_name='train_network', arg_value=train_network, valid_types=[Cell, type(None)])
|
||||
|
||||
if self._closed:
|
||||
if self._status.get('closed'):
|
||||
logger.error("The record writer is closed.")
|
||||
return False
|
||||
# Set the current summary of train step
|
||||
if self.network is not None and not self.has_graph:
|
||||
if self.network is not None and not self._status.get('has_graph'):
|
||||
graph_proto = self.network.get_func_graph_proto()
|
||||
if graph_proto is None and train_network is not None:
|
||||
graph_proto = train_network.get_func_graph_proto()
|
||||
|
@ -326,7 +330,7 @@ class SummaryRecord:
|
|||
logger.error("Failed to get proto for graph")
|
||||
else:
|
||||
self._event_writer.write({'graph': [{'step': step, 'value': graph_proto}]})
|
||||
self.has_graph = True
|
||||
self._status['has_graph'] = True
|
||||
if not _summary_tensor_cache:
|
||||
return True
|
||||
|
||||
|
@ -377,7 +381,7 @@ class SummaryRecord:
|
|||
... with SummaryRecord(log_dir="./summary_dir", file_prefix="xx_", file_suffix="_yy") as summary_record:
|
||||
... log_dir = summary_record.log_dir
|
||||
"""
|
||||
return self.full_file_name
|
||||
return self.file_info['file_path']
|
||||
|
||||
def flush(self):
|
||||
"""
|
||||
|
@ -391,7 +395,7 @@ class SummaryRecord:
|
|||
... with SummaryRecord(log_dir="./summary_dir", file_prefix="xx_", file_suffix="_yy") as summary_record:
|
||||
... summary_record.flush()
|
||||
"""
|
||||
if self._closed:
|
||||
if self._status.get('closed'):
|
||||
logger.error("The record writer is closed and can not flush.")
|
||||
elif self._event_writer:
|
||||
self._event_writer.flush()
|
||||
|
@ -408,13 +412,13 @@ class SummaryRecord:
|
|||
... finally:
|
||||
... summary_record.close()
|
||||
"""
|
||||
if not self._closed and self._event_writer:
|
||||
if not self._status.get('closed') and self._event_writer:
|
||||
# event writer flush and close
|
||||
logger.info('Please wait it may take quite some time to finish writing and closing.')
|
||||
atexit.unregister(self.close)
|
||||
self._event_writer.close()
|
||||
self._event_writer.join()
|
||||
self._closed = True
|
||||
self._status['closed'] = True
|
||||
|
||||
@staticmethod
|
||||
def _parse_from(name: str = None):
|
||||
|
|
|
@ -140,17 +140,16 @@ class ExportWriter(BaseWriter):
|
|||
}
|
||||
|
||||
if export_option in options:
|
||||
options[export_option](data, self._filepath, self._max_file_size)
|
||||
options[export_option](data, self._filepath)
|
||||
|
||||
@staticmethod
|
||||
def _export_npy(data, export_dir, max_file_size):
|
||||
def _export_npy(data, export_dir):
|
||||
"""
|
||||
export the tensor data as npy.
|
||||
|
||||
Args:
|
||||
data (dict): Export data info.
|
||||
export_dir (str): The path of export dir.
|
||||
max_file_size (Optional[int]): The maximum size in bytes of each file that can be written to the disk.
|
||||
"""
|
||||
tag = quote(data.get('tag'), safe="")
|
||||
step = int(data.get('step'))
|
||||
|
|
|
@ -390,7 +390,7 @@ def test_summary():
|
|||
with SummaryRecord(tmp_dir) as test_writer:
|
||||
train_summary_record(test_writer, steps=steps)
|
||||
|
||||
file_name = os.path.realpath(test_writer.full_file_name)
|
||||
file_name = os.path.realpath(test_writer.log_dir)
|
||||
with SummaryReader(file_name) as summary_writer:
|
||||
for _ in range(steps):
|
||||
event = summary_writer.read_event()
|
||||
|
|
|
@ -73,7 +73,7 @@ def test_summary_step2_summary_record1():
|
|||
with SummaryRecord(tmp_dir) as test_writer:
|
||||
train_summary_record(test_writer, steps=steps)
|
||||
|
||||
file_name = os.path.realpath(test_writer.full_file_name)
|
||||
file_name = os.path.realpath(test_writer.log_dir)
|
||||
with SummaryReader(file_name) as summary_writer:
|
||||
for _ in range(steps):
|
||||
event = summary_writer.read_event()
|
||||
|
|
|
@ -55,8 +55,7 @@ def test_histogram_summary():
|
|||
test_data = _wrap_test_data(Tensor([[1, 2, 3], [4, 5, 6]]))
|
||||
_cache_summary_tensor_data(test_data)
|
||||
test_writer.record(step=1)
|
||||
|
||||
file_name = os.path.join(tmp_dir, test_writer.event_file_name)
|
||||
file_name = os.path.realpath(test_writer.log_dir)
|
||||
with SummaryReader(file_name) as reader:
|
||||
event = reader.read_event()
|
||||
assert event.summary.value[0].histogram.count == 6
|
||||
|
@ -78,7 +77,7 @@ def test_histogram_multi_summary():
|
|||
_cache_summary_tensor_data(test_data)
|
||||
test_writer.record(step=i)
|
||||
|
||||
file_name = os.path.join(tmp_dir, test_writer.event_file_name)
|
||||
file_name = os.path.realpath(test_writer.log_dir)
|
||||
with SummaryReader(file_name) as reader:
|
||||
for _ in range(num_step):
|
||||
event = reader.read_event()
|
||||
|
@ -92,7 +91,7 @@ def test_histogram_summary_empty_tensor():
|
|||
_cache_summary_tensor_data(test_data)
|
||||
test_writer.record(step=1)
|
||||
|
||||
file_name = os.path.join(tmp_dir, test_writer.event_file_name)
|
||||
file_name = os.path.realpath(test_writer.log_dir)
|
||||
with SummaryReader(file_name) as reader:
|
||||
event = reader.read_event()
|
||||
assert event.summary.value[0].histogram.count == 0
|
||||
|
@ -109,7 +108,7 @@ def test_histogram_summary_same_value():
|
|||
_cache_summary_tensor_data(test_data)
|
||||
test_writer.record(step=1)
|
||||
|
||||
file_name = os.path.join(tmp_dir, test_writer.event_file_name)
|
||||
file_name = os.path.realpath(test_writer.log_dir)
|
||||
with SummaryReader(file_name) as reader:
|
||||
event = reader.read_event()
|
||||
LOG.debug(event)
|
||||
|
@ -129,7 +128,7 @@ def test_histogram_summary_high_dims():
|
|||
_cache_summary_tensor_data(test_data)
|
||||
test_writer.record(step=1)
|
||||
|
||||
file_name = os.path.join(tmp_dir, test_writer.event_file_name)
|
||||
file_name = os.path.realpath(test_writer.log_dir)
|
||||
with SummaryReader(file_name) as reader:
|
||||
event = reader.read_event()
|
||||
LOG.debug(event)
|
||||
|
@ -153,7 +152,7 @@ def test_histogram_summary_nan_inf():
|
|||
_cache_summary_tensor_data(test_data)
|
||||
test_writer.record(step=1)
|
||||
|
||||
file_name = os.path.join(tmp_dir, test_writer.event_file_name)
|
||||
file_name = os.path.realpath(test_writer.log_dir)
|
||||
with SummaryReader(file_name) as reader:
|
||||
event = reader.read_event()
|
||||
LOG.debug(event)
|
||||
|
@ -169,7 +168,7 @@ def test_histogram_summary_all_nan_inf():
|
|||
_cache_summary_tensor_data(test_data)
|
||||
test_writer.record(step=1)
|
||||
|
||||
file_name = os.path.join(tmp_dir, test_writer.event_file_name)
|
||||
file_name = os.path.realpath(test_writer.log_dir)
|
||||
with SummaryReader(file_name) as reader:
|
||||
event = reader.read_event()
|
||||
LOG.debug(event)
|
||||
|
|
Loading…
Reference in New Issue