From 07d1f5ac37ff73929c220955f80a3f559419295f Mon Sep 17 00:00:00 2001 From: HuangBingjian Date: Tue, 8 Sep 2020 16:17:05 +0800 Subject: [PATCH] fix tensor.cc --- mindspore/ccsrc/utils/tensorprint_utils.cc | 14 +- mindspore/core/ir/tensor.cc | 138 ++++++++++++------ .../test_tensor_print/tensor_print_utils.py | 68 +++++++++ .../test_tensor_print/test_tensor_print.py | 74 ++++++++++ 4 files changed, 239 insertions(+), 55 deletions(-) create mode 100644 tests/st/ops/ascend/test_tensor_print/tensor_print_utils.py create mode 100644 tests/st/ops/ascend/test_tensor_print/test_tensor_print.py diff --git a/mindspore/ccsrc/utils/tensorprint_utils.cc b/mindspore/ccsrc/utils/tensorprint_utils.cc index 18116909ae7..5f52d5d56a4 100644 --- a/mindspore/ccsrc/utils/tensorprint_utils.cc +++ b/mindspore/ccsrc/utils/tensorprint_utils.cc @@ -103,14 +103,13 @@ template void PrintScalarToString(const char *str_data_ptr, const string &tensor_type, std::ostringstream *const buf) { MS_EXCEPTION_IF_NULL(str_data_ptr); MS_EXCEPTION_IF_NULL(buf); - *buf << "Tensor shape:[1] " << tensor_type; - *buf << "\nval:"; + *buf << "Tensor(shape=[1], dtype=" << GetParseType(tensor_type) << ", value="; const T *data_ptr = reinterpret_cast(str_data_ptr); if constexpr (std::is_same::value || std::is_same::value) { const int int_data = static_cast(*data_ptr); - *buf << int_data << "\n"; + *buf << int_data << ")\n"; } else { - *buf << *data_ptr << "\n"; + *buf << *data_ptr << ")\n"; } } @@ -118,12 +117,11 @@ void PrintScalarToBoolString(const char *str_data_ptr, const string &tensor_type MS_EXCEPTION_IF_NULL(str_data_ptr); MS_EXCEPTION_IF_NULL(buf); const bool *data_ptr = reinterpret_cast(str_data_ptr); - *buf << "Tensor shape:[1] " << tensor_type; - *buf << "\nval:"; + *buf << "Tensor(shape=[1], dtype=" << GetParseType(tensor_type) << ", value="; if (*data_ptr) { - *buf << "True\n"; + *buf << "True)\n"; } else { - *buf << "False\n"; + *buf << "False)\n"; } } diff --git a/mindspore/core/ir/tensor.cc b/mindspore/core/ir/tensor.cc index c4c380c1938..4681d5ba2b0 100644 --- a/mindspore/core/ir/tensor.cc +++ b/mindspore/core/ir/tensor.cc @@ -238,17 +238,49 @@ class TensorDataImpl : public TensorData { OutputDataString(ss, 0, 0, 1, false); return ss.str(); } + ssize_t cursor = 0; + num_width_ = GetMaxNumLength(shape); SummaryStringRecursive(ss, shape, &cursor, 0, use_comma); return ss.str(); } private: + int GetNumLength(const T &num) const { + T value = num; + int count = 0; + if (value <= 0) { // Return 1 when value is 0, or add the length of '-' when value < 0 + count++; + } + while (value != 0) { + value /= 10; + count++; + } + return count; + } + + int GetMaxNumLength(const ShapeVector &shape) const { + if constexpr (std::is_same::value) { + constexpr int bool_max_len = sizeof("False") - 1; + return bool_max_len; + } else if constexpr (std::is_same::value) { + return 11; // The placeholder of float16 is set to 11. + } else if (std::is_same::value || std::is_same::value) { + return 15; // The placeholder of float/double is set to 15. + } else { + T max_value = 0; + T min_value = 0; + ssize_t index = 0; + GetMaxMinValueRecursive(shape, &index, 0, &max_value, &min_value); + return std::max(GetNumLength(max_value), GetNumLength(min_value)); + } + } + void OutputDataString(std::ostringstream &ss, ssize_t cursor, ssize_t start, ssize_t end, bool use_comma) const { const bool isScalar = ndim_ == 0 && end - start == 1; + constexpr auto isBool = std::is_same::value; constexpr auto isFloat = std::is_same::value || std::is_same::value || std::is_same::value; - constexpr auto isBool = std::is_same::value; constexpr int linefeedThreshold = isFloat ? kThreshold1DFloat : (isBool ? kThreshold1DBool : kThreshold1DInt); for (ssize_t i = start; i < end && (cursor + i) < static_cast(data_size_); i++) { const auto value = data_[cursor + i]; @@ -256,52 +288,25 @@ class TensorDataImpl : public TensorData { if (isScalar) { ss << value; } else { - if constexpr (std::is_same::value) { - ss << std::setw(11) << std::setprecision(4) << std::setiosflags(std::ios::scientific | std::ios::right) - << value; - } else { - ss << std::setw(15) << std::setprecision(8) << std::setiosflags(std::ios::scientific | std::ios::right) - << value; - } + const int precision = std::is_same::value ? 4 : 8; + ss << std::setw(num_width_) << std::setprecision(precision) + << std::setiosflags(std::ios::scientific | std::ios::right) << value; } - } else if (std::is_same::value) { + } else if (isBool) { if (isScalar) { ss << (value ? "True" : "False"); } else { - ss << std::setw(5) << std::setiosflags(std::ios::right) << (value ? "True" : "False"); + ss << std::setw(num_width_) << std::setiosflags(std::ios::right) << (value ? "True" : "False"); } } else { - constexpr auto isSigned = std::is_same::value; - if constexpr (isSigned) { - if (!isScalar && static_cast(value) >= 0) { - ss << ' '; - } - } - - // Set width and indent for different int type with signed position. - // - // uint8 width: 3, [0, 255] - // int8 width: 4, [-128, 127] - // uint16 width: 5, [0, 65535] - // int16 width: 6, [-32768, 32767] - // uint32 width: 10, [0, 4294967295] - // int32 width: 11, [-2147483648, 2147483647] - // uint64 width: NOT SET (20, [0, 18446744073709551615]) - // int64 width: NOT SET (20, [-9223372036854775808, 9223372036854775807]) - if constexpr (std::is_same::value) { - ss << std::setw(3) << std::setiosflags(std::ios::right) << static_cast(value); - } else if constexpr (std::is_same::value) { - ss << std::setw(4) << std::setiosflags(std::ios::right) << static_cast(value); - } else if constexpr (std::is_same::value) { - ss << std::setw(5) << std::setiosflags(std::ios::right) << value; - } else if constexpr (std::is_same::value) { - ss << std::setw(6) << std::setiosflags(std::ios::right) << value; - } else if constexpr (std::is_same::value) { - ss << std::setw(10) << std::setiosflags(std::ios::right) << value; - } else if constexpr (std::is_same::value) { - ss << std::setw(11) << std::setiosflags(std::ios::right) << value; - } else { + if (isScalar) { ss << value; + } else if constexpr (std::is_same::value) { + ss << std::setw(num_width_) << std::setiosflags(std::ios::right) << static_cast(value); + } else if constexpr (std::is_same::value) { + ss << std::setw(num_width_) << std::setiosflags(std::ios::right) << static_cast(value); + } else { + ss << std::setw(num_width_) << std::setiosflags(std::ios::right) << value; } } if (!isScalar && i != end - 1) { @@ -366,9 +371,9 @@ class TensorDataImpl : public TensorData { } // Handle the second half. if (num > kThreshold / 2) { - auto continue_pos = num - kThreshold / 2; - for (ssize_t i = continue_pos; i < num; i++) { - if (use_comma && i != continue_pos) { + ssize_t iter_times = std::min(static_cast(num - kThreshold / 2), static_cast(kThreshold / 2)); + for (ssize_t i = 0; i < iter_times; i++) { + if (use_comma && i != 0) { ss << ','; } ss << '\n'; @@ -380,6 +385,46 @@ class TensorDataImpl : public TensorData { ss << ']'; } + void GetMaxMinValueRecursive(const ShapeVector &shape, ssize_t *index, ssize_t depth, T *max_value, + T *min_value) const { + if (depth >= static_cast(ndim_)) { + return; + } + if (depth == static_cast(ndim_) - 1) { // Bottom dimension + ssize_t num = shape[depth]; + const bool is_multi_dim = num > kThreshold && ndim_ > 1; + for (ssize_t i = 0; i < num; i++) { + if (is_multi_dim && i >= kThreshold / 2 && i < num - kThreshold / 2) { + continue; + } + const auto value = data_[i]; + *max_value = std::max(*max_value, value); + *min_value = std::min(*min_value, value); + } + *index += num; + } else { // Middle dimension + ssize_t num = shape[depth]; + for (ssize_t i = 0; i < std::min(static_cast(kThreshold / 2), num); i++) { + GetMaxMinValueRecursive(shape, index, depth + 1, max_value, min_value); + } + if (num > kThreshold) { + ssize_t ignored = shape[depth + 1]; + for (ssize_t i = depth + 2; i < static_cast(ndim_); i++) { + ignored *= shape[i]; + } + ignored *= num - kThreshold; + *index += ignored; + } + if (num > kThreshold / 2) { + ssize_t iter_times = std::min(static_cast(num - kThreshold / 2), static_cast(kThreshold / 2)); + for (ssize_t i = 0; i < iter_times; i++) { + GetMaxMinValueRecursive(shape, index, depth + 1, max_value, min_value); + } + } + } + } + + mutable int num_width_{0}; size_t ndim_{0}; size_t data_size_{0}; std::unique_ptr data_; @@ -522,7 +567,7 @@ std::string Tensor::ToStringInternal(int limit_size) const { auto dtype = Dtype(); MS_EXCEPTION_IF_NULL(dtype); data_sync(); - buf << "Tensor(shape=" << ShapeToString(shape_) << ", dtype=" << dtype->ToString() << ','; + buf << "Tensor(shape=" << ShapeToString(shape_) << ", dtype=" << dtype->ToString() << ", value="; if (limit_size <= 0 || DataSize() < limit_size) { // Only print data for small tensor. buf << ((data().ndim() > 1) ? '\n' : ' ') << data().ToString(data_type_, shape_, false) << ')'; @@ -544,8 +589,8 @@ std::string Tensor::ToStringRepr() const { auto dtype = Dtype(); MS_EXCEPTION_IF_NULL(dtype); data_sync(); - buf << "Tensor(shape=" << ShapeToString(shape_) << ", dtype=" << dtype->ToString() << ',' - << ((data().ndim() > 1) ? '\n' : ' ') << data().ToString(data_type_, shape_, true) << ')'; + buf << "Tensor(shape=" << ShapeToString(shape_) << ", dtype=" << dtype->ToString() + << ", value=" << ((data().ndim() > 1) ? '\n' : ' ') << data().ToString(data_type_, shape_, true) << ')'; return buf.str(); } @@ -557,7 +602,6 @@ void Tensor::data_sync() const { if (!device_sync_->SyncDeviceToHost(shape(), static_cast(data().nbytes()), data_type(), data_c())) { MS_LOG(EXCEPTION) << "SyncDeviceToHost failed."; } - sync_status_ = kNeedSyncHostToDevice; } TypeId Tensor::set_data_type(const TypeId data_type) { diff --git a/tests/st/ops/ascend/test_tensor_print/tensor_print_utils.py b/tests/st/ops/ascend/test_tensor_print/tensor_print_utils.py new file mode 100644 index 00000000000..9178bd0ecc3 --- /dev/null +++ b/tests/st/ops/ascend/test_tensor_print/tensor_print_utils.py @@ -0,0 +1,68 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +import numpy as np + +import mindspore +import mindspore.context as context +import mindspore.nn as nn +from mindspore import Tensor +from mindspore.ops import operations as P + +context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") + +class TensorPrint(nn.Cell): + def __init__(self): + super().__init__() + self.print = P.Print() + + def construct(self, *inputs): + self.print(*inputs) + return inputs[0] + +def get_tensor(is_scalar, input_type): + if is_scalar == 'scalar': + if input_type == mindspore.bool_: + return Tensor(True, dtype=input_type) + if input_type in [mindspore.uint8, mindspore.uint16, mindspore.uint32, mindspore.uint64]: + return Tensor(1, dtype=input_type) + if input_type in [mindspore.int8, mindspore.int16, mindspore.int32, mindspore.int64]: + return Tensor(-1, dtype=input_type) + if input_type in [mindspore.float16, mindspore.float32, mindspore.float64]: + return Tensor(0.01, dtype=input_type) + else: + if input_type == mindspore.bool_: + return Tensor(np.array([[True, False], [False, True]]), dtype=input_type) + if input_type in [mindspore.uint8, mindspore.uint16, mindspore.uint32, mindspore.uint64]: + return Tensor(np.array([[1, 2, 3], [4, 5, 6]]), dtype=input_type) + if input_type in [mindspore.int8, mindspore.int16, mindspore.int32, mindspore.int64]: + return Tensor(np.array([[-1, 2, -3], [-4, 5, -6]]), dtype=input_type) + if input_type in [mindspore.float16, mindspore.float32, mindspore.float64]: + return Tensor(np.array([[1.0, -2.0, 3.0], [4.0, -5.0, 6.0]]), dtype=input_type) + return Tensor(False, np.bool) + +if __name__ == "__main__": + net = TensorPrint() + net(get_tensor('scalar', mindspore.bool_), get_tensor('scalar', mindspore.uint8), + get_tensor('scalar', mindspore.int8), get_tensor('scalar', mindspore.uint16), + get_tensor('scalar', mindspore.int16), get_tensor('scalar', mindspore.uint32), + get_tensor('scalar', mindspore.int32), get_tensor('scalar', mindspore.uint64), + get_tensor('scalar', mindspore.int64), get_tensor('scalar', mindspore.float16), + get_tensor('scalar', mindspore.float32), get_tensor('scalar', mindspore.float64), + get_tensor('array', mindspore.bool_), get_tensor('array', mindspore.uint8), + get_tensor('array', mindspore.int8), get_tensor('array', mindspore.uint16), + get_tensor('array', mindspore.int16), get_tensor('array', mindspore.uint32), + get_tensor('array', mindspore.int32), get_tensor('array', mindspore.uint64), + get_tensor('array', mindspore.int64), get_tensor('array', mindspore.float16), + get_tensor('array', mindspore.float32), get_tensor('array', mindspore.float64)) diff --git a/tests/st/ops/ascend/test_tensor_print/test_tensor_print.py b/tests/st/ops/ascend/test_tensor_print/test_tensor_print.py new file mode 100644 index 00000000000..5efd5ed2305 --- /dev/null +++ b/tests/st/ops/ascend/test_tensor_print/test_tensor_print.py @@ -0,0 +1,74 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +import os +import re +import pytest + +# Defines the expected value of tensor printout, corresponding to different data types. +expect_scalar = {'Bool': 'True', 'UInt': '1', 'Int': '-1', 'Float16': '*.*******', 'Float32_64': '*.**'} +expect_array = {'Bool': '\n[[ True False]\n [False True]]', 'UInt': '\n[[1 2 3]\n [4 5 6]]', + 'Int': '\n[[-1 2 -3]\n [-4 5 -6]]', + 'Float16': '\n[[ *.****e*** **.****e*** *.****e***]\n [ *.****e*** **.****e*** *.****e***]]', + 'Float32_64': '\n[[ *.********e*** **.********e*** *.********e***]\n ' \ + '[ *.********e*** **.********e*** *.********e***]]'} + +def get_expect_value(res): + if res[0] == '[1]': + if res[1] == 'Bool': + return expect_scalar['Bool'] + if res[1] in ['Uint8', 'Uint16', 'Uint32', 'Uint64']: + return expect_scalar['UInt'] + if res[1] in ['Int8', 'Int16', 'Int32', 'Int64']: + return expect_scalar['Int'] + if res[1] == 'Float16': + return expect_scalar['Float16'] + if res[1] in ['Float32', 'Float64']: + return expect_scalar['Float32_64'] + else: + if res[1] == 'Bool': + return expect_array['Bool'] + if res[1] in ['UInt8', 'UInt16', 'UInt32', 'UInt64']: + return expect_array['UInt'] + if res[1] in ['Int8', 'Int16', 'Int32', 'Int64']: + return expect_array['Int'] + if res[1] == 'Float16': + return expect_array['Float16'] + if res[1] in ['Float32', 'Float64']: + return expect_array['Float32_64'] + return 'None' + +def num_to_asterisk(data): + # Convert number and +/- to asterisk + return re.sub(r'\d|\+|\-', '*', data.group()) + +@pytest.mark.level0 +@pytest.mark.platform_arm_ascend_training +@pytest.mark.platform_x86_ascend_training +@pytest.mark.env_onecard +def test_tensor_print(): + path = os.path.split(os.path.realpath(__file__))[0] + cmd = f"python {path}/tensor_print_utils.py" + lines = os.popen(cmd).readlines() + data = ''.join(lines) + result = re.findall(r'Tensor[(]shape=(.*?), dtype=(.*?), value=(.*?)[)]', data, re.DOTALL) + assert (result != []), "Output does not meet the requirements." + for res in result: + assert (len(res) == 3), "Output does not meet the requirements." + expect = get_expect_value(res) + value = res[2] + if value.find('.'): + # Convert decimals to asterisks, such as 0.01 --> *.** and 1.0e+2 --> *.*e** + value = re.sub(r'-?\d+\.\d+|e[\+|\-]\d+', num_to_asterisk, value, re.DOTALL) + assert (repr(value) == repr(expect)), repr("output: " + value + ", expect: " + expect)