diff --git a/cmake/package_lite.cmake b/cmake/package_lite.cmake index a6e513ef315..96ada04f855 100644 --- a/cmake/package_lite.cmake +++ b/cmake/package_lite.cmake @@ -175,6 +175,8 @@ if(PLATFORM_ARM64) endif() install(FILES ${TOP_DIR}/mindspore/core/ir/dtype/type_id.h DESTINATION ${RUNTIME_INC_DIR}/ir/dtype COMPONENT ${RUNTIME_COMPONENT_NAME}) + install(FILES ${TOP_DIR}/mindspore/core/ir/format.h DESTINATION ${RUNTIME_INC_DIR}/ir + COMPONENT ${RUNTIME_COMPONENT_NAME}) install(DIRECTORY ${TOP_DIR}/include/api/ DESTINATION ${RUNTIME_INC_DIR}/api COMPONENT ${RUNTIME_COMPONENT_NAME} FILES_MATCHING PATTERN "*.h" PATTERN "ops*" EXCLUDE) file(GLOB NNACL_FILES GLOB ${NNACL_DIR}/*.h) @@ -232,6 +234,8 @@ elseif(PLATFORM_ARM32) endif() install(FILES ${TOP_DIR}/mindspore/core/ir/dtype/type_id.h DESTINATION ${RUNTIME_INC_DIR}/ir/dtype COMPONENT ${RUNTIME_COMPONENT_NAME}) + install(FILES ${TOP_DIR}/mindspore/core/ir/format.h DESTINATION ${RUNTIME_INC_DIR}/ir + COMPONENT ${RUNTIME_COMPONENT_NAME}) install(DIRECTORY ${TOP_DIR}/include/api/ DESTINATION ${RUNTIME_INC_DIR}/api COMPONENT ${RUNTIME_COMPONENT_NAME} FILES_MATCHING PATTERN "*.h" PATTERN "ops*" EXCLUDE) file(GLOB NNACL_FILES GLOB ${NNACL_DIR}/*.h) @@ -315,6 +319,8 @@ elseif(WIN32) endif() install(FILES ${TOP_DIR}/mindspore/core/ir/dtype/type_id.h DESTINATION ${RUNTIME_INC_DIR}/ir/dtype COMPONENT ${RUNTIME_COMPONENT_NAME}) + install(FILES ${TOP_DIR}/mindspore/core/ir/format.h DESTINATION ${RUNTIME_INC_DIR}/ir + COMPONENT ${RUNTIME_COMPONENT_NAME}) install(DIRECTORY ${TOP_DIR}/include/api/ DESTINATION ${RUNTIME_INC_DIR}/api COMPONENT ${RUNTIME_COMPONENT_NAME} FILES_MATCHING PATTERN "*.h" PATTERN "ops*" EXCLUDE) install(FILES ${TOP_DIR}/build/mindspore/src/${MINDSPORE_LITE_LIB_NAME}.a DESTINATION ${RUNTIME_LIB_DIR} @@ -338,6 +344,8 @@ else() endif() install(FILES ${TOP_DIR}/mindspore/core/ir/dtype/type_id.h DESTINATION ${RUNTIME_INC_DIR}/ir/dtype COMPONENT ${RUNTIME_COMPONENT_NAME}) + install(FILES ${TOP_DIR}/mindspore/core/ir/format.h DESTINATION ${RUNTIME_INC_DIR}/ir + COMPONENT ${RUNTIME_COMPONENT_NAME}) install(DIRECTORY ${TOP_DIR}/include/api/ DESTINATION ${RUNTIME_INC_DIR}/api COMPONENT ${RUNTIME_COMPONENT_NAME} FILES_MATCHING PATTERN "*.h" PATTERN "ops*" EXCLUDE) install(FILES ${TOP_DIR}/mindspore/lite/build/src/${MINDSPORE_LITE_LIB_NAME}.so DESTINATION ${RUNTIME_LIB_DIR} diff --git a/mindspore/lite/include/ms_tensor.h b/mindspore/lite/include/ms_tensor.h index 9cdaee185f5..b7093f6e629 100644 --- a/mindspore/lite/include/ms_tensor.h +++ b/mindspore/lite/include/ms_tensor.h @@ -21,6 +21,7 @@ #include "ir/dtype/type_id.h" namespace mindspore { +enum Format : int64_t; namespace tensor { /// \brief MSTensor defined tensor in MindSpore Lite. class MS_API MSTensor { @@ -58,8 +59,20 @@ class MS_API MSTensor { virtual TypeId data_type() const = 0; /// \brief Set data type of current MSTensor. + /// + /// \param[in] data_type Define data type, which is shown in type_id.h. virtual void set_data_type(TypeId data_type) = 0; + /// \brief Set format of current MSTensor. + /// + /// \param[in] format Define format of data, which is shown in format.h + virtual void set_format(mindspore::Format format) = 0; + + /// \brief Get format of current MSTensor. + /// + /// \return format, which is shown in format.h + virtual mindspore::Format format() const = 0; + /// \brief Get shape of the MindSpore Lite MSTensor. /// /// \return A vector of int as the shape of the MindSpore Lite MSTensor. diff --git a/mindspore/lite/micro/coder/allocator/allocator.cc b/mindspore/lite/micro/coder/allocator/allocator.cc index a684e56a3b2..de94579cda9 100644 --- a/mindspore/lite/micro/coder/allocator/allocator.cc +++ b/mindspore/lite/micro/coder/allocator/allocator.cc @@ -32,7 +32,7 @@ void *MemoryAllocator::MallocWeightTensor(TypeId type_id, size_t size, MallocTyp size_t type_size = item->second; std::vector shape = {1, static_cast(size / type_size)}; auto cate = type == kOfflinePackWeight ? Tensor::Category::CONST_TENSOR : Tensor::Category::VAR; - Tensor *weight = new (std::nothrow) lite::Tensor(type_id, shape, schema::Format_NHWC, cate); + Tensor *weight = new (std::nothrow) lite::Tensor(type_id, shape, mindspore::NHWC, cate); MS_CHECK_PTR_RET_NULL(weight); std::string runtime_addr = kWeightPrefixName + std::to_string(weight_index_++); malloc_weights_addr_.insert(std::make_pair(weight, runtime_addr)); diff --git a/mindspore/lite/micro/coder/generator/component/const_blocks/mtensor.cc b/mindspore/lite/micro/coder/generator/component/const_blocks/mtensor.cc index 8cc48318587..11d444cd18b 100644 --- a/mindspore/lite/micro/coder/generator/component/const_blocks/mtensor.cc +++ b/mindspore/lite/micro/coder/generator/component/const_blocks/mtensor.cc @@ -38,6 +38,7 @@ const char tensor_header[] = R"RAW( #define MINDSPORE_LITE_MICRO_LIBRARY_SOURCE_TENSOR_H_ #include "include/ms_tensor.h" +#include "include/ir/format.h" namespace mindspore { namespace lite { @@ -64,6 +65,8 @@ class MTensor : public mindspore::tensor::MSTensor { AllocatorPtr allocator() const override { return nullptr; } TypeId data_type() const override { return data_type_; } void set_data_type(TypeId data_type) override { data_type_ = data_type; } + void set_format(mindspore::Format format) override {} + mindspore::Format format() const override { return mindspore::NHWC; } Vector shape() const override { return shape_; } void set_shape(const Vector &shape) override { shape_ = shape; } int ElementsNum() const override; diff --git a/mindspore/lite/micro/coder/graph.cc b/mindspore/lite/micro/coder/graph.cc index 63e2d6df742..d67c4ea0ae3 100644 --- a/mindspore/lite/micro/coder/graph.cc +++ b/mindspore/lite/micro/coder/graph.cc @@ -78,7 +78,8 @@ int CoderGraph::ConvertTensors() { } int origin_data_type = static_cast(origin_tensor->dataType()); Tensor *dstTensor = new (std::nothrow) - lite::Tensor(TypeId(origin_data_type), shape, origin_tensor->format(), TensorCategory(origin_tensor)); + lite::Tensor(TypeId(origin_data_type), shape, static_cast(origin_tensor->format()), + TensorCategory(origin_tensor)); MS_CHECK_PTR(dstTensor); if (origin_tensor->nodeType() == NodeType_ValueNode && origin_tensor->data() != nullptr && origin_tensor->data()->size() > 0) { diff --git a/mindspore/lite/micro/coder/opcoders/base/conv2d_base_coder.cc b/mindspore/lite/micro/coder/opcoders/base/conv2d_base_coder.cc index 46be375effd..eb2218c3f8a 100644 --- a/mindspore/lite/micro/coder/opcoders/base/conv2d_base_coder.cc +++ b/mindspore/lite/micro/coder/opcoders/base/conv2d_base_coder.cc @@ -95,37 +95,38 @@ int Conv2DBaseCoder::MallocConvQuantParams(size_t input_arg_num, size_t filter_a return RET_OK; } -std::string Conv2DBaseCoder::LayoutTransformFp32(schema::Format src_format, schema::Format dst_format) { +std::string Conv2DBaseCoder::LayoutTransformFp32(mindspore::Format src_format, mindspore::Format dst_format) { std::string ret; - if (src_format == schema::Format_NHWC && dst_format == schema::Format_NC4HW4) { + if (src_format == mindspore::NHWC && dst_format == mindspore::NC4HW4) { ret = "PackNHWCToNC4HW4Fp32"; - } else if (src_format == schema::Format_NHWC && dst_format == schema::Format_NHWC4) { + } else if (src_format == mindspore::NHWC && dst_format == mindspore::NHWC4) { ret = "PackNHWCToNHWC4Fp32"; - } else if (src_format == schema::Format_NC4HW4 && dst_format == schema::Format_NHWC4) { + } else if (src_format == mindspore::NC4HW4 && dst_format == mindspore::NHWC4) { ret = "PackNC4HW4ToNHWC4Fp32"; - } else if (src_format == schema::Format_NCHW && dst_format == schema::Format_NC4HW4) { + } else if (src_format == mindspore::NCHW && dst_format == mindspore::NC4HW4) { ret = "PackNCHWToNC4HW4Fp32"; - } else if (src_format == schema::Format_NC4HW4 && dst_format == schema::Format_NHWC) { + } else if (src_format == mindspore::NC4HW4 && dst_format == mindspore::NHWC) { ret = "PackNC4HW4ToNHWCFp32"; } else { - MS_LOG(ERROR) << "Unsupported transform from " << schema::EnumNameFormat(src_format) << " to " - << schema::EnumNameFormat(dst_format); + MS_LOG(ERROR) << "Unsupported transform from " << schema::EnumNameFormat(static_cast(src_format)) + << " to " << schema::EnumNameFormat(static_cast(dst_format)); } return ret; } -std::string Conv2DBaseCoder::LayoutTransformInt8(schema::Format src_format, schema::Format dst_format) { +std::string Conv2DBaseCoder::LayoutTransformInt8(mindspore::Format src_format, mindspore::Format dst_format) { std::string ret; - if (src_format == schema::Format_NHWC && dst_format == schema::Format_NHWC4) { + if (src_format == mindspore::NHWC && dst_format == mindspore::NHWC4) { ret = "PackNHWCToNHWC4Int8"; } else { - MS_LOG(ERROR) << "Unsupported transform from " << schema::EnumNameFormat(src_format) << " to " - << schema::EnumNameFormat(dst_format); + MS_LOG(ERROR) << "Unsupported transform from " << schema::EnumNameFormat(static_cast(src_format)) + << " to " << schema::EnumNameFormat(static_cast(dst_format)); } return ret; } -std::string Conv2DBaseCoder::LayoutTransform(TypeId data_type, schema::Format src_format, schema::Format dst_format) { +std::string Conv2DBaseCoder::LayoutTransform(TypeId data_type, mindspore::Format src_format, + mindspore::Format dst_format) { std::string ret; switch (data_type) { case kNumberTypeInt8: @@ -352,8 +353,8 @@ int Conv2DBaseCoder::Init() { int Conv2DBaseCoder::CheckLayout(lite::Tensor *input_tensor) { mindspore::TypeId data_type = input_tensor->data_type(); - schema::Format input_format = input_tensor->format(); - schema::Format execute_format = schema::Format_NHWC4; + mindspore::Format input_format = input_tensor->format(); + mindspore::Format execute_format = mindspore::NHWC4; convert_func_ = LayoutTransform(data_type, input_format, execute_format); MS_CHECK_TRUE(!convert_func_.empty(), "layout convert func is nullptr."); return RET_OK; diff --git a/mindspore/lite/micro/coder/opcoders/base/conv2d_base_coder.h b/mindspore/lite/micro/coder/opcoders/base/conv2d_base_coder.h index 5b5044b469c..e226c1b9e22 100644 --- a/mindspore/lite/micro/coder/opcoders/base/conv2d_base_coder.h +++ b/mindspore/lite/micro/coder/opcoders/base/conv2d_base_coder.h @@ -57,11 +57,11 @@ class Conv2DBaseCoder : public OperatorCoder { int CheckLayout(lite::Tensor *input_tensor); - std::string LayoutTransformFp32(schema::Format src_format, schema::Format dst_format); + std::string LayoutTransformFp32(mindspore::Format src_format, mindspore::Format dst_format); - std::string LayoutTransformInt8(schema::Format src_format, schema::Format dst_format); + std::string LayoutTransformInt8(mindspore::Format src_format, mindspore::Format dst_format); - std::string LayoutTransform(TypeId data_type, schema::Format src_format, schema::Format dst_format); + std::string LayoutTransform(TypeId data_type, mindspore::Format src_format, mindspore::Format dst_format); private: int MallocConvQuantParams(size_t input_arg_num, size_t filter_arg_num, size_t output_arg_num); diff --git a/mindspore/lite/micro/coder/opcoders/cmsis-nn/int8/fullconnection_int8_coder.cc b/mindspore/lite/micro/coder/opcoders/cmsis-nn/int8/fullconnection_int8_coder.cc index 07ab249851e..64ba99899ce 100644 --- a/mindspore/lite/micro/coder/opcoders/cmsis-nn/int8/fullconnection_int8_coder.cc +++ b/mindspore/lite/micro/coder/opcoders/cmsis-nn/int8/fullconnection_int8_coder.cc @@ -29,7 +29,7 @@ int FullConnectionInt8Coder::Prepare(CoderContext *const context) { return RET_OK; } -void FullConnectionInt8Coder::ConfigInputOutput() { output_tensor_->set_format(schema::Format_NHWC); } +void FullConnectionInt8Coder::ConfigInputOutput() { output_tensor_->set_format(mindspore::NHWC); } int FullConnectionInt8Coder::DoCode(CoderContext *const context) { Serializer code; diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/int8/conv2d_3x3_int8_coder.cc b/mindspore/lite/micro/coder/opcoders/nnacl/int8/conv2d_3x3_int8_coder.cc index 3fee43883f6..33f5520b931 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/int8/conv2d_3x3_int8_coder.cc +++ b/mindspore/lite/micro/coder/opcoders/nnacl/int8/conv2d_3x3_int8_coder.cc @@ -109,7 +109,7 @@ int Conv2D3x3Int8Coder::InitTmpBuffer(CoderContext *const context) { return RET_OK; } -void Conv2D3x3Int8Coder::ConfigInputOutput() { output_tensor_->set_format(schema::Format_NHWC); } +void Conv2D3x3Int8Coder::ConfigInputOutput() { output_tensor_->set_format(mindspore::NHWC); } int Conv2D3x3Int8Coder::Prepare(CoderContext *const context) { MS_CHECK_RET_CODE(Conv2DBaseCoder::Init(), "ConvolutionBase init failed."); diff --git a/mindspore/lite/micro/coder/utils/type_cast.cc b/mindspore/lite/micro/coder/utils/type_cast.cc index a6cc8acbbd2..69b3378e1ce 100644 --- a/mindspore/lite/micro/coder/utils/type_cast.cc +++ b/mindspore/lite/micro/coder/utils/type_cast.cc @@ -75,24 +75,24 @@ std::string GetTensorDataType(TypeId type) { } } -std::string EnumMicroTensorFormat(schema::Format format) { +std::string EnumMicroTensorFormat(mindspore::Format format) { switch (format) { - case schema::Format_NHWC: + case mindspore::NHWC: return "Format_NHWC"; - case schema::Format_NCHW: + case mindspore::NCHW: return "Format_NCHW"; - case schema::Format_HWKC: + case mindspore::HWKC: return "Format_HWKC"; - case schema::Format_HWCK: + case mindspore::HWCK: return "Format_HWCK"; - case schema::Format_KCHW: + case mindspore::KCHW: return "Format_KCHW"; - case schema::Format_CKHW: + case mindspore::CKHW: return "Format_CKHW"; - case schema::Format_NC4HW4: + case mindspore::NC4HW4: return "Format_NC4HW4"; default: - MS_LOG(ERROR) << "unsupported format: " << schema::EnumNameFormat(format); + MS_LOG(ERROR) << "unsupported format: " << schema::EnumNameFormat(static_cast(format)); return "Format_NUM_OF_FORMAT"; } } diff --git a/mindspore/lite/micro/coder/utils/type_cast.h b/mindspore/lite/micro/coder/utils/type_cast.h index 41f87383a42..3dddef2ad6a 100644 --- a/mindspore/lite/micro/coder/utils/type_cast.h +++ b/mindspore/lite/micro/coder/utils/type_cast.h @@ -33,7 +33,7 @@ std::string EnumNameDataType(TypeId type); std::string GetTensorDataType(TypeId type); -std::string EnumMicroTensorFormat(schema::Format format); +std::string EnumMicroTensorFormat(mindspore::Format format); std::string EnumMicroTensorDataType(TypeId type); diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist_stm32f746/include/ir/format.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist_stm32f746/include/ir/format.h new file mode 100644 index 00000000000..be3fe63fd9d --- /dev/null +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist_stm32f746/include/ir/format.h @@ -0,0 +1,46 @@ +/** + * This is the C++ adaptation and derivative work of Myia (https://github.com/mila-iqia/myia/). + * + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CORE_IR_FORMAT_H_ +#define MINDSPORE_CORE_IR_FORMAT_H_ + +#include + +namespace mindspore { +enum Format : int64_t { + NCHW = 0, + NHWC = 1, + NHWC4 = 2, + HWKC = 3, + HWCK = 4, + KCHW = 5, + CKHW = 6, + KHWC = 7, + CHWK = 8, + HW = 9, + HW4 = 10, + NC = 11, + NC4 = 12, + NC4HW4 = 13, + NUM_OF_FORMAT = 14, + NCDHW = 15, + NWC = 16, + NCW = 17 +}; +} // namespace mindspore +#endif // MINDSPORE_CORE_IR_FORMAT_H_ diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist_stm32f746/include/ms_tensor.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist_stm32f746/include/ms_tensor.h old mode 100755 new mode 100644 index e64bc3fd725..4c5d2763c1f --- a/mindspore/lite/micro/example/mnist_stm32f746/mnist_stm32f746/include/ms_tensor.h +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist_stm32f746/include/ms_tensor.h @@ -21,6 +21,7 @@ #include "ir/dtype/type_id.h" namespace mindspore { +enum Format : int64_t ; namespace tensor { /// \brief MSTensor defined tensor in MindSpore Lite. class MS_API MSTensor { @@ -53,8 +54,20 @@ class MS_API MSTensor { virtual TypeId data_type() const = 0; /// \brief Set data type of current MSTensor. + /// + /// \param[in] data_type Define data type, which is shown in type_id.h. virtual void set_data_type(TypeId data_type) = 0; + /// \brief Set format of current MSTensor. + /// + /// \param[in] format Define format of data, which is shown in format.h + virtual void set_format(mindspore::Format format) = 0; + + /// \brief Get format of current MSTensor. + /// + /// \return format, which is shown in format.h + virtual mindspore::Format format() const = 0; + /// \brief Get shape of the MindSpore Lite MSTensor. /// /// \return A vector of int as the shape of the MindSpore Lite MSTensor. diff --git a/mindspore/lite/micro/example/mnist_stm32f746/mnist_stm32f746/src/tensor.h b/mindspore/lite/micro/example/mnist_stm32f746/mnist_stm32f746/src/tensor.h index 9aa3f4bd424..16e86173780 100644 --- a/mindspore/lite/micro/example/mnist_stm32f746/mnist_stm32f746/src/tensor.h +++ b/mindspore/lite/micro/example/mnist_stm32f746/mnist_stm32f746/src/tensor.h @@ -19,6 +19,7 @@ #define MINDSPORE_LITE_MICRO_LIBRARY_SOURCE_TENSOR_H_ #include "include/ms_tensor.h" +#include "ir/format.h" namespace mindspore { namespace lite { @@ -45,6 +46,8 @@ class MTensor : public mindspore::tensor::MSTensor { mindspore::Allocator *allocator() const override { return nullptr; } TypeId data_type() const override { return data_type_; } void set_data_type(TypeId data_type) override { data_type_ = data_type; } + void set_format(mindspore::Format format) override {} + mindspore::Format format() const override { return mindspore::NHWC; } Vector shape() const override { return shape_; } void set_shape(const Vector &shape) override { shape_ = shape; } int ElementsNum() const override; diff --git a/mindspore/lite/micro/example/mnist_x86/src/tensor.h b/mindspore/lite/micro/example/mnist_x86/src/tensor.h index caaa99c301d..62d77e8006e 100644 --- a/mindspore/lite/micro/example/mnist_x86/src/tensor.h +++ b/mindspore/lite/micro/example/mnist_x86/src/tensor.h @@ -19,6 +19,7 @@ #define MINDSPORE_LITE_MICRO_LIBRARY_SOURCE_TENSOR_H_ #include "include/ms_tensor.h" +#include "ir/format.h" namespace mindspore { namespace lite { @@ -45,6 +46,8 @@ class MTensor : public mindspore::tensor::MSTensor { AllocatorPtr allocator() const override { return nullptr; } TypeId data_type() const override { return data_type_; } void set_data_type(TypeId data_type) override { data_type_ = data_type; } + void set_format(mindspore::Format format) override {} + mindspore::Format format() const override { return mindspore::NHWC; } Vector shape() const override { return shape_; } void set_shape(const Vector &shape) override { shape_ = shape; } int ElementsNum() const override; diff --git a/mindspore/lite/src/common/tensor_util.cc b/mindspore/lite/src/common/tensor_util.cc index 1deb68728dd..93e2bc93beb 100644 --- a/mindspore/lite/src/common/tensor_util.cc +++ b/mindspore/lite/src/common/tensor_util.cc @@ -54,7 +54,7 @@ int OutputTensor2TensorC(const std::vector &tensors, std::vector return RET_ERROR; } tensor_c->data_type_ = kNumberTypeFloat32; - tensor_c->format_ = schema::Format::Format_NCHW; + tensor_c->format_ = mindspore::NCHW; tensor_c->data_ = nullptr; tensor_c->shape_size_ = 0; tensors_c->push_back(tensor_c); @@ -99,7 +99,7 @@ void Tensor2TensorC(Tensor *src, TensorC *dst) { } void TensorC2Tensor(TensorC *src, Tensor *dst) { - dst->set_format(static_cast(src->format_)); + dst->set_format(static_cast(src->format_)); dst->set_data_type(static_cast(src->data_type_)); // get data during the runtime period dst->set_shape(std::vector(src->shape_, src->shape_ + src->shape_size_)); } @@ -131,7 +131,7 @@ int TensorList2TensorListC(TensorList *src, TensorListC *dst) { int TensorListC2TensorList(TensorListC *src, TensorList *dst) { dst->set_data_type(static_cast(src->data_type_)); - dst->set_format(static_cast(src->format_)); + dst->set_format(static_cast(src->format_)); dst->set_shape(std::vector(1, src->element_num_)); dst->set_tensors_data_type(static_cast(src->tensors_data_type_)); diff --git a/mindspore/lite/src/lite_session.cc b/mindspore/lite/src/lite_session.cc index 5f3e7636e72..c45e4ceba7b 100644 --- a/mindspore/lite/src/lite_session.cc +++ b/mindspore/lite/src/lite_session.cc @@ -25,6 +25,7 @@ #include "src/common/utils.h" #include "src/common/prim_util.h" #include "src/common/graph_util.h" +#include "src/common/tensor_util.h" #include "src/kernel_registry.h" #include "src/lite_model.h" #include "src/weight_decoder.h" @@ -163,7 +164,8 @@ lite::Tensor *LiteSession::ConvertTensor(const schema::Tensor &src_tensor) { tensor_list->set_tensors_data_type(tensor_data_type); } } else { - dst_tensor = new (std::nothrow) Tensor(TypeId(src_tensor.dataType()), shape, src_tensor.format(), src_category); + dst_tensor = new (std::nothrow) + Tensor(TypeId(src_tensor.dataType()), shape, static_cast(src_tensor.format()), src_category); } return dst_tensor; } @@ -596,7 +598,11 @@ int LiteSession::RunGraph(const KernelCallBack &before, const KernelCallBack &af MS_LOG(ERROR) << "Not support multi-threading"; return RET_ERROR; } - STATUS ret; + STATUS ret = CheckGraphInputFormat(inputs_); + if (ret != RET_OK) { + MS_LOG(ERROR) << "model input's format mey be changed, which should be NHWC."; + return ret; + } MS_ASSERT(this->context_); if (before == nullptr && after == nullptr) { ret = executor_->Run(this->inputs_, this->outputs_, this->kernels_, this->context_->allocator.get()); diff --git a/mindspore/lite/src/ops/compat/attr_transfer_common.cc b/mindspore/lite/src/ops/compat/attr_transfer_common.cc index d21bd7aac17..71f776aec06 100644 --- a/mindspore/lite/src/ops/compat/attr_transfer_common.cc +++ b/mindspore/lite/src/ops/compat/attr_transfer_common.cc @@ -27,8 +27,8 @@ schema::Tensor *AttrToTensor(void *data, int data_size, bool is_array, TypeId ty return nullptr; } auto dst_tensor = - (is_array ? new (std::nothrow) Tensor(type_id, {data_size}, schema::Format_NHWC, Tensor::Category::CONST_TENSOR) - : new (std::nothrow) Tensor(type_id, {}, schema::Format_NHWC, Tensor::Category::CONST_SCALAR)); + (is_array ? new (std::nothrow) Tensor(type_id, {data_size}, mindspore::NHWC, Tensor::Category::CONST_TENSOR) + : new (std::nothrow) Tensor(type_id, {}, mindspore::NHWC, Tensor::Category::CONST_SCALAR)); auto dst_data = dst_tensor->MutableData(); if (dst_data == nullptr) { MS_LOG(ERROR) << "Data from tensor is nullptr"; diff --git a/mindspore/lite/src/runtime/agent/npu/npu_converter_utils.cc b/mindspore/lite/src/runtime/agent/npu/npu_converter_utils.cc index 704adc3fab3..032cc3422d3 100644 --- a/mindspore/lite/src/runtime/agent/npu/npu_converter_utils.cc +++ b/mindspore/lite/src/runtime/agent/npu/npu_converter_utils.cc @@ -25,14 +25,14 @@ ge::Shape ConverterToNPUShape(const std::vector &src_shape) { return ge::Shape({shapes}); } -ge::Format ConverterToNPUFormat(schema::Format format) { +ge::Format ConverterToNPUFormat(mindspore::Format format) { ge::Format ge_format; switch (format) { - case schema::Format_NCHW: + case mindspore::NCHW: ge_format = ge::FORMAT_NCHW; break; - case schema::Format_NHWC: - case schema::Format_KHWC: + case mindspore::NHWC: + case mindspore::KHWC: ge_format = ge::FORMAT_NHWC; break; default: diff --git a/mindspore/lite/src/runtime/agent/npu/npu_converter_utils.h b/mindspore/lite/src/runtime/agent/npu/npu_converter_utils.h index 32b63f09288..ca62686bd5f 100644 --- a/mindspore/lite/src/runtime/agent/npu/npu_converter_utils.h +++ b/mindspore/lite/src/runtime/agent/npu/npu_converter_utils.h @@ -30,7 +30,7 @@ std::shared_ptr ConverterToNPUTensor(Tensor *src); hiai::op::Data *ConverterToNPUData(Tensor *src, const std::string &name); -ge::Format ConverterToNPUFormat(schema::Format format); +ge::Format ConverterToNPUFormat(mindspore::Format format); ge::DataType ConverterToNPUDataType(TypeId type_id); diff --git a/mindspore/lite/src/runtime/agent/npu/optimizer/npu_fusion_pass.cc b/mindspore/lite/src/runtime/agent/npu/optimizer/npu_fusion_pass.cc index 82948f61892..276b1e32a92 100644 --- a/mindspore/lite/src/runtime/agent/npu/optimizer/npu_fusion_pass.cc +++ b/mindspore/lite/src/runtime/agent/npu/optimizer/npu_fusion_pass.cc @@ -170,7 +170,7 @@ void UpdatePostTensors(kernel::LiteKernel *cur_kernel) { return; } - tensor->set_format(schema::Format_NCHW); + tensor->set_format(mindspore::NCHW); auto nhwc_shape = tensor->shape(); tensor->set_shape({nhwc_shape[0], nhwc_shape[3], nhwc_shape[1], nhwc_shape[2]}); for (auto out_kernel : cur_kernel->out_kernels()) { diff --git a/mindspore/lite/src/runtime/agent/npu/optimizer/npu_insert_transform_pass.cc b/mindspore/lite/src/runtime/agent/npu/optimizer/npu_insert_transform_pass.cc index 6799798870d..85471691c71 100644 --- a/mindspore/lite/src/runtime/agent/npu/optimizer/npu_insert_transform_pass.cc +++ b/mindspore/lite/src/runtime/agent/npu/optimizer/npu_insert_transform_pass.cc @@ -124,7 +124,7 @@ int NPUInsertTransformPass::InsertNode(kernel::LiteKernel *kernel, kernel::LiteK std::vector nchw_shape = {nhwc_shape[0], nhwc_shape[3], nhwc_shape[1], nhwc_shape[2]}; auto nh2nc_name = kernel_name + "_nh2nc_" + std::to_string(total++); - auto nh2nc_tensor = new (std::nothrow) Tensor(in_tensor->data_type(), nchw_shape, schema::Format_NCHW, Tensor::VAR); + auto nh2nc_tensor = new (std::nothrow) Tensor(in_tensor->data_type(), nchw_shape, mindspore::NCHW, Tensor::VAR); if (nh2nc_tensor == nullptr) { MS_LOG(ERROR) << "New nchw tensor failed when inserting nchw2nhwc kernel."; return RET_ERROR; @@ -135,7 +135,7 @@ int NPUInsertTransformPass::InsertNode(kernel::LiteKernel *kernel, kernel::LiteK all_tensors_->push_back(nh2nc_tensors[0]); auto nc2nh_name = kernel_name + "_nc2nh_" + std::to_string(total++); - auto nc2nh_tensor = new (std::nothrow) Tensor(in_tensor->data_type(), nhwc_shape, schema::Format_NHWC, Tensor::VAR); + auto nc2nh_tensor = new (std::nothrow) Tensor(in_tensor->data_type(), nhwc_shape, mindspore::NHWC, Tensor::VAR); if (nc2nh_tensor == nullptr) { MS_LOG(ERROR) << "New nhwc tensor failed when inserting nhwc2nchw kernel."; return RET_ERROR; @@ -151,7 +151,7 @@ int NPUInsertTransformPass::InsertNode(kernel::LiteKernel *kernel, kernel::LiteK auto *nc2nh_kernel = NPUPassUtils::CreateNchw2NhwcKernel(nh2nc_tensors, nc2nh_tensors, context_, nc2nh_name); trans_kernels->push_back(nc2nh_kernel); - auto nh2nc_perm_tensor = new Tensor(kNumberTypeInt32, {4}, schema::Format_NHWC, Tensor::CONST_TENSOR); + auto nh2nc_perm_tensor = new Tensor(kNumberTypeInt32, {4}, mindspore::NHWC, Tensor::CONST_TENSOR); auto nh2nc_data = nh2nc_perm_tensor->MutableData(); if (nh2nc_data == nullptr) { return RET_ERROR; @@ -160,7 +160,7 @@ int NPUInsertTransformPass::InsertNode(kernel::LiteKernel *kernel, kernel::LiteK memcpy(nh2nc_data, nh2nc_perm_vector.data(), 4 * sizeof(int)); all_tensors_->push_back(nh2nc_perm_tensor); - auto nc2nh_perm_tensor = new Tensor(kNumberTypeInt32, {4}, schema::Format_NHWC, Tensor::CONST_TENSOR); + auto nc2nh_perm_tensor = new Tensor(kNumberTypeInt32, {4}, mindspore::NHWC, Tensor::CONST_TENSOR); auto nc2nh_data = nc2nh_perm_tensor->MutableData(); if (nc2nh_data == nullptr) { return RET_ERROR; diff --git a/mindspore/lite/src/runtime/agent/npu/optimizer/npu_transform_pass.cc b/mindspore/lite/src/runtime/agent/npu/optimizer/npu_transform_pass.cc index 44c33d7c97f..5fa715f93dd 100644 --- a/mindspore/lite/src/runtime/agent/npu/optimizer/npu_transform_pass.cc +++ b/mindspore/lite/src/runtime/agent/npu/optimizer/npu_transform_pass.cc @@ -46,7 +46,7 @@ int NPUTransformPass::InsertPreNodes(kernel::LiteKernel *kernel, std::vectorin_tensors()[0]->shape(); std::vector nchw_shape = {nhwc_shape[0], nhwc_shape[3], nhwc_shape[1], nhwc_shape[2]}; auto tensor = - new (std::nothrow) Tensor(kernel->in_tensors()[0]->data_type(), nchw_shape, schema::Format_NCHW, Tensor::VAR); + new (std::nothrow) Tensor(kernel->in_tensors()[0]->data_type(), nchw_shape, mindspore::NCHW, Tensor::VAR); if (tensor == nullptr) { MS_LOG(ERROR) << "New nchw tensor failed when inserting pre nhwc2nchw kernel."; return RET_ERROR; @@ -57,7 +57,7 @@ int NPUTransformPass::InsertPreNodes(kernel::LiteKernel *kernel, std::vector pre_trans_out_tensors = {tensor}; all_tensors_->push_back(pre_trans_out_tensors[0]); - auto nh2nc_perm_tensor = new Tensor(kNumberTypeInt32, {4}, schema::Format_NHWC, Tensor::CONST_TENSOR); + auto nh2nc_perm_tensor = new Tensor(kNumberTypeInt32, {4}, mindspore::NHWC, Tensor::CONST_TENSOR); auto nh2nc_data = nh2nc_perm_tensor->MutableData(); if (nh2nc_data == nullptr) { return RET_ERROR; @@ -107,7 +107,7 @@ int NPUTransformPass::InsertPostNodes(kernel::LiteKernel *kernel, std::vectorout_tensors()[0]->shape(); std::vector nchw_shape = {nhwc_shape[0], nhwc_shape[3], nhwc_shape[1], nhwc_shape[2]}; auto nc2nh_tensor = - new (std::nothrow) Tensor(kernel->out_tensors()[0]->data_type(), nchw_shape, schema::Format_NCHW, Tensor::VAR); + new (std::nothrow) Tensor(kernel->out_tensors()[0]->data_type(), nchw_shape, mindspore::NCHW, Tensor::VAR); if (nc2nh_tensor == nullptr) { MS_LOG(ERROR) << "New nchw tensor failed when inserting post nchw2nhwc kernel."; return RET_ERROR; @@ -119,7 +119,7 @@ int NPUTransformPass::InsertPostNodes(kernel::LiteKernel *kernel, std::vectorMutableData(); if (nc2nh_data == nullptr) { return RET_ERROR; @@ -141,7 +141,7 @@ int NPUTransformPass::InsertPostNodes(kernel::LiteKernel *kernel, std::vectorMutableData(); if (nc2nh_data == nullptr) { return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/base/depth_to_space_base.cc b/mindspore/lite/src/runtime/kernel/arm/base/depth_to_space_base.cc index 1906cf44b6c..95e96ea7380 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/depth_to_space_base.cc +++ b/mindspore/lite/src/runtime/kernel/arm/base/depth_to_space_base.cc @@ -22,7 +22,7 @@ using mindspore::lite::RET_PARAM_INVALID; namespace mindspore::kernel { int DepthToSpaceBaseCPUKernel::ReSize() { - if (in_tensors_.at(0)->format() != schema::Format::Format_NHWC) { + if (in_tensors_.at(0)->format() != mindspore::NHWC) { MS_LOG(ERROR) << "depth_to_space only support NHWC now!"; return RET_FORMAT_ERR; } diff --git a/mindspore/lite/src/runtime/kernel/arm/base/group_convolution_creator.cc b/mindspore/lite/src/runtime/kernel/arm/base/group_convolution_creator.cc index 400bfb6c978..b1b299e9a0e 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/group_convolution_creator.cc +++ b/mindspore/lite/src/runtime/kernel/arm/base/group_convolution_creator.cc @@ -62,8 +62,8 @@ static inline lite::Tensor *TensorMalloc(lite::Tensor *tensor) { } lite::Tensor *CreateConstTensor(lite::Tensor *tensor, const std::vector &shape, const int index) { - auto new_tensor = new (std::nothrow) - lite::Tensor(tensor->data_type(), shape, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR); + auto new_tensor = + new (std::nothrow) lite::Tensor(tensor->data_type(), shape, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR); if (new_tensor == nullptr) { MS_LOG(ERROR) << "Create new_tensor failed."; return nullptr; @@ -125,7 +125,7 @@ void GroupConvCreator::FreeGroupConvs() { int GroupConvCreator::NewInputTensor(std::vector *tensors) { auto in_tensor = - CreateVarTensor({input_shape_, schema::Format_NHWC, data_type_, lite::Tensor::Category::VAR, true}, infered_); + CreateVarTensor({input_shape_, mindspore::NHWC, data_type_, lite::Tensor::Category::VAR, true}, infered_); if (in_tensor == nullptr) { return lite::RET_ERROR; } diff --git a/mindspore/lite/src/runtime/kernel/arm/base/group_convolution_creator.h b/mindspore/lite/src/runtime/kernel/arm/base/group_convolution_creator.h index b0992da6354..52321b418a6 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/group_convolution_creator.h +++ b/mindspore/lite/src/runtime/kernel/arm/base/group_convolution_creator.h @@ -25,7 +25,7 @@ namespace mindspore::kernel { struct TensorInfo { std::vector shape_; - schema::Format format_; + mindspore::Format format_; TypeId data_type_; lite::Tensor::Category tensor_type_; bool is_in_; diff --git a/mindspore/lite/src/runtime/kernel/arm/base/layout_transform.cc b/mindspore/lite/src/runtime/kernel/arm/base/layout_transform.cc index 05e799916a4..a00dbbb6eab 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/layout_transform.cc +++ b/mindspore/lite/src/runtime/kernel/arm/base/layout_transform.cc @@ -17,35 +17,34 @@ #include "src/runtime/kernel/arm/base/layout_transform.h" #include "src/common/log_adapter.h" -using mindspore::schema::Format; namespace mindspore::kernel { -LayoutConvertor LayoutTransformFp32(schema::Format src_format, schema::Format dst_format) { - if (src_format == schema::Format::Format_NHWC && dst_format == schema::Format::Format_NC4HW4) { +LayoutConvertor LayoutTransformFp32(mindspore::Format src_format, mindspore::Format dst_format) { + if (src_format == mindspore::NHWC && dst_format == mindspore::NC4HW4) { return PackNHWCToNC4HW4Fp32; - } else if (src_format == schema::Format::Format_NHWC && dst_format == schema::Format::Format_NHWC4) { + } else if (src_format == mindspore::NHWC && dst_format == mindspore::NHWC4) { return PackNHWCToNHWC4Fp32; - } else if (src_format == schema::Format::Format_NC4HW4 && dst_format == schema::Format::Format_NHWC4) { + } else if (src_format == mindspore::NC4HW4 && dst_format == mindspore::NHWC4) { return PackNC4HW4ToNHWC4Fp32; - } else if (src_format == schema::Format::Format_NCHW && dst_format == schema::Format::Format_NC4HW4) { + } else if (src_format == mindspore::NCHW && dst_format == mindspore::NC4HW4) { return PackNCHWToNC4HW4Fp32; - } else if (src_format == schema::Format::Format_NC4HW4 && dst_format == schema::Format::Format_NHWC) { + } else if (src_format == mindspore::NC4HW4 && dst_format == mindspore::NHWC) { return PackNC4HW4ToNHWCFp32; } else { - MS_LOG(ERROR) << "Unsupported transform from " << EnumNameFormat(src_format) << " to " - << EnumNameFormat(dst_format); + MS_LOG(ERROR) << "Unsupported transform from " << EnumNameFormat(static_cast(src_format)) << " to " + << EnumNameFormat(static_cast(dst_format)); return nullptr; } } -LayoutConvertor LayoutTransformInt8(schema::Format src_format, schema::Format dst_format) { - if (src_format == schema::Format::Format_NHWC && dst_format == schema::Format::Format_NHWC4) { +LayoutConvertor LayoutTransformInt8(mindspore::Format src_format, mindspore::Format dst_format) { + if (src_format == mindspore::NHWC && dst_format == mindspore::NHWC4) { return PackNHWCToNHWC4Int8; } else { return nullptr; } } -LayoutConvertor LayoutTransform(TypeId data_type, schema::Format src_format, schema::Format dst_format) { +LayoutConvertor LayoutTransform(TypeId data_type, mindspore::Format src_format, mindspore::Format dst_format) { switch (data_type) { case kNumberTypeInt8: return LayoutTransformInt8(src_format, dst_format); diff --git a/mindspore/lite/src/runtime/kernel/arm/base/layout_transform.h b/mindspore/lite/src/runtime/kernel/arm/base/layout_transform.h index 500e5453d2c..d4229284dec 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/layout_transform.h +++ b/mindspore/lite/src/runtime/kernel/arm/base/layout_transform.h @@ -21,20 +21,19 @@ #include #endif #include "nnacl/pack.h" -#include "schema/ops_generated.h" #include "src/tensor.h" namespace mindspore::kernel { typedef void (*LayoutConvertor)(const void *src, void *dst, int batch, int plane, int channel); #ifdef ENABLE_FP16 -LayoutConvertor LayoutTransformFp16(schema::Format src_format, schema::Format dst_format); +LayoutConvertor LayoutTransformFp16(mindspore::Format src_format, mindspore::Format dst_format); #endif -LayoutConvertor LayoutTransformFp32(schema::Format src_format, schema::Format dst_format); +LayoutConvertor LayoutTransformFp32(mindspore::Format src_format, mindspore::Format dst_format); -LayoutConvertor LayoutTransformInt8(schema::Format src_format, schema::Format dst_format); +LayoutConvertor LayoutTransformInt8(mindspore::Format src_format, mindspore::Format dst_format); -LayoutConvertor LayoutTransform(TypeId data_type, schema::Format src_format, schema::Format dst_format); +LayoutConvertor LayoutTransform(TypeId data_type, mindspore::Format src_format, mindspore::Format dst_format); } // namespace mindspore::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_BASE_LAYOUT_TRANSFORM_H_ diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_delegate_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_delegate_fp16.cc index 1172782bc5b..f4f973d67df 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_delegate_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_delegate_fp16.cc @@ -32,7 +32,6 @@ using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_Conv2DFusion; -using mindspore::schema::Format::Format_NHWC; namespace mindspore::kernel { void ConvolutionDelegateFP16CPUKernel::FreeCopiedData() { diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/layout_transform_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/layout_transform_fp16.cc index 29ce79bda6c..473037c032e 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/layout_transform_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/layout_transform_fp16.cc @@ -15,24 +15,23 @@ */ #include "src/runtime/kernel/arm/fp16/layout_transform_fp16.h" #include "nnacl/fp16/pack_fp16.h" -#include "schema/ops_generated.h" #include "src/common/log_adapter.h" namespace mindspore::kernel { -LayoutConvertor LayoutTransformFp16(schema::Format src_format, schema::Format dst_format) { - if (src_format == schema::Format::Format_NHWC && dst_format == schema::Format::Format_NC4HW4) { +LayoutConvertor LayoutTransformFp16(mindspore::Format src_format, mindspore::Format dst_format) { + if (src_format == mindspore::NHWC && dst_format == mindspore::NC4HW4) { return PackNHWCToNC4HW4Fp16; - } else if (src_format == schema::Format::Format_NHWC && dst_format == schema::Format::Format_NHWC4) { + } else if (src_format == mindspore::NHWC && dst_format == mindspore::NHWC4) { return PackNHWCToNHWC4Fp16; - } else if (src_format == schema::Format::Format_NC4HW4 && dst_format == schema::Format::Format_NHWC4) { + } else if (src_format == mindspore::NC4HW4 && dst_format == mindspore::NHWC4) { return PackNC4HW4ToNHWC4Fp16; - } else if (src_format == schema::Format::Format_NCHW && dst_format == schema::Format::Format_NC4HW4) { + } else if (src_format == mindspore::NCHW && dst_format == mindspore::NC4HW4) { return PackNCHWToNC4HW4Fp16; - } else if (src_format == schema::Format::Format_NC4HW4 && dst_format == schema::Format::Format_NHWC) { + } else if (src_format == mindspore::NC4HW4 && dst_format == mindspore::NHWC) { return PackNC4HW4ToNHWCFp16; } else { - MS_LOG(ERROR) << "Unsupported transform from " << EnumNameFormat(src_format) << " to " - << EnumNameFormat(dst_format); + MS_LOG(ERROR) << "Unsupported transform from " << EnumNameFormat(static_cast(src_format)) << " to " + << EnumNameFormat(static_cast(dst_format)); return nullptr; } } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/layout_transform_fp16.h b/mindspore/lite/src/runtime/kernel/arm/fp16/layout_transform_fp16.h index 37e11da649a..d1532549e78 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/layout_transform_fp16.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/layout_transform_fp16.h @@ -18,10 +18,9 @@ #define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP16_LAYOUT_TRANSFORM_FP16_H_ #include "src/runtime/kernel/arm/base/layout_transform.h" -#include "schema/ops_generated.h" namespace mindspore::kernel { -LayoutConvertor LayoutTransformFp16(schema::Format src_format, schema::Format dst_format); +LayoutConvertor LayoutTransformFp16(mindspore::Format src_format, mindspore::Format dst_format); } // namespace mindspore::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP16_LAYOUT_TRANSFORM_FP16_H_ diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/batch_to_space_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/batch_to_space_fp32.cc index fda740c2e55..b03b63d9701 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/batch_to_space_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/batch_to_space_fp32.cc @@ -45,7 +45,7 @@ int BatchToSpaceCPUKernel::Processinput() { } int BatchToSpaceCPUKernel::Init() { - MS_ASSERT(in_tensors_.at(0)->format() == schema::Format::Format_NHWC); + MS_ASSERT(in_tensors_.at(0)->format() == mindspore::NHWC); if (!InferShapeDone()) { return RET_OK; } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/depth_to_space_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/depth_to_space_fp32.cc index 4e1fa2128a3..086a1a12356 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/depth_to_space_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/depth_to_space_fp32.cc @@ -41,7 +41,7 @@ int DepthToSpaceCPUKernel::Run() { const float *input_data = reinterpret_cast(input->data_c()); float *output_data = reinterpret_cast(output->data_c()); auto in_shape = input->shape(); - if (input->format() == schema::Format::Format_NHWC) { + if (input->format() == mindspore::NHWC) { DepthToSpaceForNHWC(input_data, output_data, in_shape.data(), param_); return RET_OK; } else { diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/space_to_depth_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/space_to_depth_fp32.cc index 2b0f9bfb424..35dbee63205 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/space_to_depth_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/space_to_depth_fp32.cc @@ -45,7 +45,7 @@ int SpaceToDepthCPUKernel::Init() { } int SpaceToDepthCPUKernel::ReSize() { - if (in_tensors_.at(0)->format() != schema::Format::Format_NHWC) { + if (in_tensors_.at(0)->format() != mindspore::NHWC) { MS_LOG(ERROR) << "space_to_depth only support NHWC now!"; return RET_FORMAT_ERR; } @@ -90,7 +90,7 @@ int SpaceToDepthRun(void *cdata, int task_id, float lhs_scale, float rhs_scale) int SpaceToDepthCPUKernel::Run() { input_ptr_ = reinterpret_cast(in_tensors_.at(0)->data_c()); output_ptr_ = reinterpret_cast(out_tensors_.at(0)->data_c()); - if (in_tensors_.at(0)->format() == schema::Format::Format_NHWC) { + if (in_tensors_.at(0)->format() == mindspore::NHWC) { auto ret = static_cast(this->context_) ->thread_pool_->ParallelLaunch(SpaceToDepthRun, this, thread_h_num_); if (ret != RET_OK) { diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/batch_to_space_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/batch_to_space_int8.cc index e9b374331f4..2ce38e98ff2 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/batch_to_space_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/batch_to_space_int8.cc @@ -36,7 +36,7 @@ BatchToSpaceInt8CPUKernel::~BatchToSpaceInt8CPUKernel() { } int BatchToSpaceInt8CPUKernel::Init() { - MS_ASSERT(in_tensors_.at(0)->format() == schema::Format::Format_NHWC); + MS_ASSERT(in_tensors_.at(0)->format() == mindspore::NHWC); in_quant_arg_ = reinterpret_cast(malloc(sizeof(QuantArg))); if (in_quant_arg_ == nullptr) { MS_LOG(ERROR) << "Malloc QuantArg for BatchToSpace int8 op failed!"; diff --git a/mindspore/lite/src/runtime/kernel/npu/convolution_base_npu.cc b/mindspore/lite/src/runtime/kernel/npu/convolution_base_npu.cc index 129e59278cd..4399e7f2b03 100644 --- a/mindspore/lite/src/runtime/kernel/npu/convolution_base_npu.cc +++ b/mindspore/lite/src/runtime/kernel/npu/convolution_base_npu.cc @@ -83,10 +83,10 @@ int ConvolutionBaseNPUKernel::InitBiasConst(const std::vector &i MS_LOG(ERROR) << "New bias const failed."; return RET_ERROR; } - inputs[BIAS_INDEX]->set_format(schema::Format_NCHW); + inputs[BIAS_INDEX]->set_format(mindspore::NCHW); auto bias_tensor = mindspore::lite::ConverterToNPUTensor(inputs[BIAS_INDEX]); bias_->set_attr_value(bias_tensor); - inputs[BIAS_INDEX]->set_format(schema::Format_NHWC); + inputs[BIAS_INDEX]->set_format(mindspore::NHWC); } return RET_OK; } diff --git a/mindspore/lite/src/runtime/kernel/npu/fullconnection_npu.cc b/mindspore/lite/src/runtime/kernel/npu/fullconnection_npu.cc index 56b46114bde..2bd60b090a1 100644 --- a/mindspore/lite/src/runtime/kernel/npu/fullconnection_npu.cc +++ b/mindspore/lite/src/runtime/kernel/npu/fullconnection_npu.cc @@ -62,10 +62,10 @@ int FullconnectionNPUKernel::SetNPUInputs(const std::vector &inp MS_LOG(ERROR) << "New weight const failed."; return RET_ERROR; } - inputs[1]->set_format(schema::Format_NCHW); + inputs[1]->set_format(mindspore::NCHW); auto weight_tensor = mindspore::lite::ConverterToNPUTensor(inputs[1]); weight_->set_attr_value(weight_tensor); - inputs[1]->set_format(schema::Format_NHWC); + inputs[1]->set_format(mindspore::NHWC); fc_->set_input_x2(*weight_).set_attr_transpose_x2(true); if (fc_param_->has_bias_) { diff --git a/mindspore/lite/src/runtime/kernel/npu/scale_npu.cc b/mindspore/lite/src/runtime/kernel/npu/scale_npu.cc index 7db0bbc537a..168aecf1d83 100644 --- a/mindspore/lite/src/runtime/kernel/npu/scale_npu.cc +++ b/mindspore/lite/src/runtime/kernel/npu/scale_npu.cc @@ -21,7 +21,6 @@ using mindspore::kernel::KERNEL_ARCH::kNPU; using mindspore::lite::KernelRegistrar; -using mindspore::schema::Format_NHWC; using mindspore::schema::PrimitiveType_ScaleFusion; namespace mindspore::kernel { @@ -37,7 +36,7 @@ int ScaleNPUKernel::IsSupport(const std::vector &inputs, const s if (scale_parameter_->axis_ < 0) { scale_parameter_->axis_ = scale_parameter_->axis_ + inputs[0]->shape().size(); } - if (inputs.size() > 1 && inputs[0]->shape().size() == DIMS_4D && inputs[0]->format() == schema::Format_NHWC) { + if (inputs.size() > 1 && inputs[0]->shape().size() == DIMS_4D && inputs[0]->format() == mindspore::NHWC) { // scale now only supports on axis 3 if (scale_parameter_->axis_ != 3) { MS_LOG(ERROR) << "Npu scale axis attr only support on channel, now is " << scale_parameter_->axis_; diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/scale.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/scale.cc index 5739a909750..0306354d5a9 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/scale.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/scale.cc @@ -113,7 +113,7 @@ int ScaleOpenCLKernel::InitWeights() { << in_tensor->data_type(); return RET_ERROR; } - } else if (in_tensor->format() == schema::Format_NHWC && scale_tensor->format() == schema::Format_NHWC) { + } else if (in_tensor->format() == mindspore::NHWC && scale_tensor->format() == mindspore::NHWC) { if (scale_dtype == kNumberTypeFloat32 || scale_dtype == kNumberTypeFloat16) { auto image2d_info = GpuTensorInfo(scale_tensor); int pack_weight_size = image2d_info.ElementsC4Num; diff --git a/mindspore/lite/src/runtime/parallel_executor.cc b/mindspore/lite/src/runtime/parallel_executor.cc index ef0bf82aa3b..c2dabfa524e 100644 --- a/mindspore/lite/src/runtime/parallel_executor.cc +++ b/mindspore/lite/src/runtime/parallel_executor.cc @@ -52,7 +52,7 @@ int ParallelExecutor::Run(const std::vector &in_tensors, const std::ve MS_LOG(ERROR) << "Graph input tensor is nullptr"; return RET_ERROR; } - if (inTensor->format() != schema::Format::Format_NHWC) { + if (inTensor->format() != mindspore::NHWC) { MS_LOG(ERROR) << "Model input tensor should be NHWC"; return RET_ERROR; } diff --git a/mindspore/lite/src/tensor.cc b/mindspore/lite/src/tensor.cc index 8471e25da65..28cb2a9892d 100644 --- a/mindspore/lite/src/tensor.cc +++ b/mindspore/lite/src/tensor.cc @@ -14,12 +14,12 @@ * limitations under the License. */ +#include "src/tensor.h" #include #include #include #include #include -#include "src/tensor.h" #include "securec/include/securec.h" #include "include/errorcode.h" @@ -28,7 +28,7 @@ namespace lite { namespace { constexpr int kMaxMallocSize = 1024 * 1024 * 300; } // namespace -Tensor::Tensor(const TypeId data_type, std::vector shape, const schema::Format &format, Category category) +Tensor::Tensor(const TypeId data_type, std::vector shape, const mindspore::Format &format, Category category) : data_type_(data_type), shape_(std::move(shape)), format_(format), category_(category) {} int Tensor::CopyTensorData(const Tensor &src_tensor, Tensor *dst_tensor) { @@ -95,24 +95,24 @@ int32_t Tensor::Batch() const { return RET_ERROR; } switch (this->format_) { - case schema::Format::Format_NHWC: - case schema::Format::Format_NHWC4: - case schema::Format::Format_NCHW: - case schema::Format::Format_NC4HW4: - case schema::Format::Format_KCHW: - case schema::Format::Format_KHWC: - case schema::Format::Format_NC: - case schema::Format::Format_NC4: + case mindspore::NHWC: + case mindspore::NHWC4: + case mindspore::NCHW: + case mindspore::NC4HW4: + case mindspore::KCHW: + case mindspore::KHWC: + case mindspore::NC: + case mindspore::NC4: return this->shape_[0]; - case schema::Format::Format_HWCK: - case schema::Format::Format_CHWK: + case mindspore::HWCK: + case mindspore::CHWK: return this->shape_[3]; - case schema::Format::Format_HWKC: + case mindspore::HWKC: return this->shape_[2]; - case schema::Format::Format_CKHW: + case mindspore::CKHW: return this->shape_[1]; default: - MS_LOG(ERROR) << "Unsupported format: " << EnumNameFormat(this->format_); + MS_LOG(ERROR) << "Unsupported format: " << EnumNameFormat(static_cast(this->format_)); return RET_ERROR; } } @@ -123,21 +123,21 @@ int32_t Tensor::Channel() const { return RET_ERROR; } switch (this->format_) { - case schema::Format::Format_NCHW: - case schema::Format::Format_KCHW: - case schema::Format::Format_NC: - case schema::Format::Format_NC4: + case mindspore::NCHW: + case mindspore::KCHW: + case mindspore::NC: + case mindspore::NC4: return this->shape_[1]; - case schema::Format::Format_HWCK: + case mindspore::HWCK: return this->shape_[2]; - case schema::Format::Format_HWKC: - case schema::Format::Format_NHWC: - case schema::Format::Format_NHWC4: - case schema::Format::Format_NC4HW4: - case schema::Format::Format_KHWC: + case mindspore::HWKC: + case mindspore::NHWC: + case mindspore::NHWC4: + case mindspore::NC4HW4: + case mindspore::KHWC: return this->shape_[3]; - case schema::Format::Format_CKHW: - case schema::Format::Format_CHWK: + case mindspore::CKHW: + case mindspore::CHWK: return this->shape_[0]; default: return RET_ERROR; @@ -150,23 +150,23 @@ int32_t Tensor::Height() const { return RET_ERROR; } switch (this->format_) { - case schema::Format::Format_NCHW: - case schema::Format::Format_KCHW: - case schema::Format::Format_CKHW: + case mindspore::NCHW: + case mindspore::KCHW: + case mindspore::CKHW: return this->shape_[2]; - case schema::Format::Format_NHWC: - case schema::Format::Format_NHWC4: - case schema::Format::Format_NC4HW4: - case schema::Format::Format_KHWC: - case schema::Format::Format_CHWK: + case mindspore::NHWC: + case mindspore::NHWC4: + case mindspore::NC4HW4: + case mindspore::KHWC: + case mindspore::CHWK: return this->shape_[1]; - case schema::Format::Format_HWCK: - case schema::Format::Format_HWKC: - case schema::Format::Format_HW: - case schema::Format::Format_HW4: + case mindspore::HWCK: + case mindspore::HWKC: + case mindspore::HW: + case mindspore::HW4: return this->shape_[0]; default: - MS_LOG(ERROR) << "Unsupported format: " << EnumNameFormat(this->format_); + MS_LOG(ERROR) << "Unsupported format: " << EnumNameFormat(static_cast(this->format_)); return RET_ERROR; } } @@ -177,20 +177,20 @@ int32_t Tensor::Width() const { return -1; } switch (this->format_) { - case schema::Format::Format_NCHW: - case schema::Format::Format_KCHW: - case schema::Format::Format_CKHW: + case mindspore::NCHW: + case mindspore::KCHW: + case mindspore::CKHW: return this->shape_[3]; - case schema::Format::Format_KHWC: - case schema::Format::Format_NHWC: - case schema::Format::Format_NHWC4: - case schema::Format::Format_NC4HW4: - case schema::Format::Format_CHWK: + case mindspore::KHWC: + case mindspore::NHWC: + case mindspore::NHWC4: + case mindspore::NC4HW4: + case mindspore::CHWK: return this->shape_[2]; - case schema::Format::Format_HWCK: - case schema::Format::Format_HWKC: - case schema::Format::Format_HW: - case schema::Format::Format_HW4: + case mindspore::HWCK: + case mindspore::HWKC: + case mindspore::HW: + case mindspore::HW4: return this->shape_[1]; default: return RET_ERROR; @@ -199,9 +199,7 @@ int32_t Tensor::Width() const { size_t Tensor::Size() const { size_t element_size = DataTypeSize(this->data_type_); - auto element_num = (format_ == schema::Format::Format_NC4HW4 || format_ == schema::Format::Format_NHWC4) - ? ElementsC4Num() - : ElementsNum(); + auto element_num = (format_ == mindspore::NC4HW4 || format_ == mindspore::NHWC4) ? ElementsC4Num() : ElementsNum(); if (element_num < 0) { MS_LOG(ERROR) << "Element number of tensor should large than 0 : " << element_num; return 0; @@ -241,7 +239,7 @@ int Tensor::DimensionSize(const size_t index) const { std::string Tensor::ToString() const { std::ostringstream oss; - oss << "schema::Format: " << EnumNameFormat(this->format_); + oss << "schema::Format: " << EnumNameFormat(static_cast(this->format_)); oss << " DataType: " << this->data_type_; oss << " Category: " << this->category_; oss << " Shape:"; diff --git a/mindspore/lite/src/tensor.h b/mindspore/lite/src/tensor.h index 334e670f19f..88069088388 100644 --- a/mindspore/lite/src/tensor.h +++ b/mindspore/lite/src/tensor.h @@ -24,6 +24,7 @@ #include #include #include "include/ms_tensor.h" +#include "ir/format.h" #include "src/runtime/inner_allocator.h" #include "src/common/log_adapter.h" @@ -56,7 +57,7 @@ class Tensor : public mindspore::tensor::MSTensor { }; Tensor() = default; - Tensor(TypeId data_type, std::vector shape, const schema::Format &format = schema::Format::Format_NHWC, + Tensor(TypeId data_type, std::vector shape, const mindspore::Format &format = mindspore::NHWC, Category category = VAR); Tensor(const Tensor &tensor) = delete; @@ -133,9 +134,9 @@ class Tensor : public mindspore::tensor::MSTensor { void set_category(Category category) { this->category_ = category; } - void set_format(schema::Format format) { this->format_ = format; } + void set_format(mindspore::Format format) override { this->format_ = format; } - schema::Format format() const { return this->format_; } + mindspore::Format format() const override { return this->format_; } virtual int ref_count() const { return ref_count_; } @@ -215,7 +216,7 @@ class Tensor : public mindspore::tensor::MSTensor { void *data_ = nullptr; TypeId data_type_; std::vector shape_; - schema::Format format_; + mindspore::Format format_; Category category_; std::atomic_int ref_count_ = {0}; size_t init_ref_count_ = 0; diff --git a/mindspore/lite/src/tensorlist.cc b/mindspore/lite/src/tensorlist.cc index ebf7dcf66a1..1c7c1243e8e 100644 --- a/mindspore/lite/src/tensorlist.cc +++ b/mindspore/lite/src/tensorlist.cc @@ -24,7 +24,7 @@ namespace mindspore::lite { TensorList::TensorList(std::vector shape, std::vector element_shape, Category category) - : Tensor(kObjectTypeTensorType, std::move(shape), schema::Format::Format_NHWC, category), + : Tensor(kObjectTypeTensorType, std::move(shape), mindspore::NHWC, category), element_shape_(std::move(element_shape)) {} TensorList::~TensorList() { diff --git a/mindspore/lite/src/train/train_export.cc b/mindspore/lite/src/train/train_export.cc index 12e3c6c4e1b..64e3190abf9 100644 --- a/mindspore/lite/src/train/train_export.cc +++ b/mindspore/lite/src/train/train_export.cc @@ -117,7 +117,7 @@ std::unique_ptr TrainExport::CreateTensor(const mindspore::lite auto tensorT = std::make_unique(); tensorT->nodeType = scTensor->nodeType(); tensorT->dims = tensor->shape(); - tensorT->format = tensor->format(); + tensorT->format = static_cast(tensor->format()); tensorT->name = tensor->tensor_name(); tensorT->refCount = 0; tensorT->offset = 0; diff --git a/mindspore/lite/src/train/train_session.cc b/mindspore/lite/src/train/train_session.cc index e520f457d65..17ad9b49802 100644 --- a/mindspore/lite/src/train/train_session.cc +++ b/mindspore/lite/src/train/train_session.cc @@ -142,7 +142,12 @@ int TrainSession::RunGraph(const KernelCallBack &before, const KernelCallBack &a } auto run_kernel = (train_mode_) ? train_kernels_ : inference_kernels_; - auto ret = CheckTensorsInvalid(inputs_); + auto ret = CheckGraphInputFormat(inputs_); + if (ret != RET_OK) { + MS_LOG(ERROR) << "model input's format mey be changed, which should be NHWC."; + return ret; + } + ret = CheckTensorsInvalid(inputs_); if (ret != RET_OK) { MS_LOG(ERROR) << "CheckInputs failed"; return ret; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/constant_of_shape_fp32_test.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/constant_of_shape_fp32_test.cc index 7f82a80769a..b64ec1a14a5 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/constant_of_shape_fp32_test.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/constant_of_shape_fp32_test.cc @@ -27,7 +27,7 @@ class TestConstantOfShapeFp32 : public mindspore::CommonTest { int ConstantOfShapeTestInit(std::vector *inputs_, std::vector *outputs_, float *a_ptr, std::vector a_shape) { - auto in_t = new lite::Tensor(kNumberTypeInt32, a_shape, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR); + auto in_t = new lite::Tensor(kNumberTypeInt32, a_shape, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR); in_t->MallocData(); memcpy(in_t->MutableData(), a_ptr, sizeof(float) * in_t->ElementsNum()); inputs_->push_back(in_t); @@ -36,7 +36,7 @@ int ConstantOfShapeTestInit(std::vector *inputs_, std::vector
  • MallocData(); outputs_->push_back(out_t); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/convolution_depthwise_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/convolution_depthwise_fp32_tests.cc index 7bb7793f2ae..fc0b9db8737 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/convolution_depthwise_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/convolution_depthwise_fp32_tests.cc @@ -60,7 +60,7 @@ void InitConvDwCreator(std::vector *inputs, std::vectorset_data_type(kNumberTypeFloat32); - input->set_format(schema::Format_NHWC); + input->set_format(mindspore::NHWC); input->set_shape({conv_param->input_batch_, conv_param->input_h_, conv_param->input_w_, conv_param->input_channel_}); input->MallocData(); memcpy(input->MutableData(), input_data, input_size); @@ -91,7 +91,7 @@ void InitConvDwCreator(std::vector *inputs, std::vectorset_data_type(kNumberTypeFloat32); output->set_shape( {conv_param->output_batch_, conv_param->output_h_, conv_param->output_w_, conv_param->output_channel_}); - output->set_format(schema::Format_NHWC); + output->set_format(mindspore::NHWC); output->MallocData(); memset(output->MutableData(), 0, output->ElementsNum() * sizeof(float)); outputs->push_back(output); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/crop_fp32_test.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/crop_fp32_test.cc index 010181bc581..728f536d5cd 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/crop_fp32_test.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/crop_fp32_test.cc @@ -257,12 +257,12 @@ TEST_F(CropTestFp32, CropTest11) { std::vector out_shape = {1, 4, 2, 2}; std::vector inputs; std::vector outputs; - auto in_t = new lite::Tensor(kNumberTypeFloat, in_shape, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR); + auto in_t = new lite::Tensor(kNumberTypeFloat, in_shape, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR); in_t->MallocData(); memcpy(in_t->MutableData(), input, sizeof(float) * in_t->ElementsNum()); inputs.push_back(in_t); - auto out_t = new lite::Tensor(kNumberTypeFloat, out_shape, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR); + auto out_t = new lite::Tensor(kNumberTypeFloat, out_shape, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR); out_t->MallocData(); outputs.push_back(out_t); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/deconvolution_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/deconvolution_fp32_tests.cc index 7aca4d0a619..3fb5d3b82c8 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/deconvolution_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/deconvolution_fp32_tests.cc @@ -323,8 +323,7 @@ TEST_F(TestDeConvolutionFp32, PostConvFuncC8Test8_8) { int DeConvTestInit1(std::vector *inputs_, std::vector *outputs_, ConvParameter *conv_param, float **correct) { std::vector in_dims_nhwc = {1, 5, 7, 2}; - auto *in_t = - new lite::Tensor(kNumberTypeFloat, in_dims_nhwc, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR); + auto *in_t = new lite::Tensor(kNumberTypeFloat, in_dims_nhwc, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR); in_t->MallocData(); float in_nchw[] = { 0.39451003, 0.15045597, 0.5367726, 0.62690735, 0.113554195, 0.5402554, 0.5522764, 0.044319753, 0.25721782, @@ -341,7 +340,7 @@ int DeConvTestInit1(std::vector *inputs_, std::vector weight_dims_nhwc = {2, 3, 3, 6}; auto *weight_t = - new lite::Tensor(kNumberTypeFloat, weight_dims_nhwc, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR); + new lite::Tensor(kNumberTypeFloat, weight_dims_nhwc, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR); weight_t->MallocData(); float weight_nchw[] = { 0.061163727, -0.06261389, 0.07708351, -0.019354159, -0.3859104, -0.082844816, -0.21268463, -0.15746808, @@ -362,7 +361,7 @@ int DeConvTestInit1(std::vector *inputs_, std::vectorChannel(), 0, 0); inputs_->push_back(weight_t); - auto *bias_t = new lite::Tensor(kNumberTypeFloat, {6}, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR); + auto *bias_t = new lite::Tensor(kNumberTypeFloat, {6}, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR); bias_t->MallocData(); float bias[] = {-0.19064677, -0.0034778118, 0.63741624, -1.0311537, -1.0288948, 0.71384084}; memcpy(bias_t->MutableData(), bias, sizeof(float) * 6); @@ -370,7 +369,7 @@ int DeConvTestInit1(std::vector *inputs_, std::vector output_nhwc_dims = {1, 9, 13, 6}; auto *out_t = - new lite::Tensor(kNumberTypeFloat, output_nhwc_dims, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR); + new lite::Tensor(kNumberTypeFloat, output_nhwc_dims, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR); out_t->MallocData(); outputs_->push_back(out_t); @@ -499,7 +498,7 @@ TEST_F(TestDeConvolutionFp32, DeConvTest1) { int DeConvTestInit2(std::vector *inputs_, std::vector *outputs_, ConvParameter *conv_param, float **correct) { - auto *in_t = new lite::Tensor(kNumberTypeFloat, {1, 4, 2, 3}, schema::Format_NHWC, lite::Tensor::Category::VAR); + auto *in_t = new lite::Tensor(kNumberTypeFloat, {1, 4, 2, 3}, mindspore::NHWC, lite::Tensor::Category::VAR); in_t->MallocData(); float in[] = {7.7566547, 19.250782, 17.923292, 13.584222, 3.3293908, 9.734102, 18.83455, -1.5142503, -0.29382008, 18.686155, 0.087307654, 4.2010098, -2.2539594, 4.1795673, 13.142356, -3.5939367, @@ -508,7 +507,7 @@ int DeConvTestInit2(std::vector *inputs_, std::vectorpush_back(in_t); auto *weight_t = - new lite::Tensor(kNumberTypeFloat, {3, 3, 3, 2}, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR); + new lite::Tensor(kNumberTypeFloat, {3, 3, 3, 2}, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR); weight_t->MallocData(); float weight[] = {-0.39557076, 0.15087655, 0.35216075, -0.20893791, 0.28683448, 0.08006268, 0.9830812, 0.27212173, 0.5171944, -0.0014505, 0.78694165, 0.25425306, 0.16605458, -0.06127124, @@ -522,7 +521,7 @@ int DeConvTestInit2(std::vector *inputs_, std::vectorpush_back(weight_t); std::vector out_nhwc_dims = {1, 7, 3, 2}; - auto *out_t = new lite::Tensor(kNumberTypeFloat, out_nhwc_dims, schema::Format_NHWC, lite::Tensor::Category::VAR); + auto *out_t = new lite::Tensor(kNumberTypeFloat, out_nhwc_dims, mindspore::NHWC, lite::Tensor::Category::VAR); out_t->MallocData(); outputs_->push_back(out_t); @@ -568,7 +567,7 @@ TEST_F(TestDeConvolutionFp32, DeConvTest2) { int DeConvTestInit3(std::vector *inputs_, std::vector *outputs_, ConvParameter *conv_param, float **correct) { std::vector in_dims_nhwc = {1, 3, 3, 2}; - auto *in_t = new lite::Tensor(kNumberTypeFloat, in_dims_nhwc, schema::Format_NHWC, lite::Tensor::Category::VAR); + auto *in_t = new lite::Tensor(kNumberTypeFloat, in_dims_nhwc, mindspore::NHWC, lite::Tensor::Category::VAR); in_t->MallocData(); float in_nchw[] = {0.10411751, 0.24034509, 0.71456534, 0.75286126, 0.9778457, 0.21043599, 0.26498786, 0.6701024, 0.9744634, 0.49075702, 0.03877404, 0.48646277, @@ -579,7 +578,7 @@ int DeConvTestInit3(std::vector *inputs_, std::vector w_dims_nhwc = {2, 2, 2, 2}; auto *weight_t = - new lite::Tensor(kNumberTypeFloat, w_dims_nhwc, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR); + new lite::Tensor(kNumberTypeFloat, w_dims_nhwc, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR); weight_t->MallocData(); float w_nchw[] = {-0.108016446, -0.44254777, 0.29249913, 0.18764605, 1.1250675, 0.29441583, -0.34362152, 0.7557833, 0.16503833, 0.2418737, -0.26612744, 0.5072577, @@ -589,7 +588,7 @@ int DeConvTestInit3(std::vector *inputs_, std::vectorpush_back(weight_t); std::vector out_dims_nhwc = {1, 9, 9, 2}; - auto *out_t = new lite::Tensor(kNumberTypeFloat, out_dims_nhwc, schema::Format_NC4HW4, lite::Tensor::Category::VAR); + auto *out_t = new lite::Tensor(kNumberTypeFloat, out_dims_nhwc, mindspore::NC4HW4, lite::Tensor::Category::VAR); out_t->MallocData(); outputs_->push_back(out_t); @@ -648,7 +647,7 @@ int DeConvTestInit4(std::vector *inputs_, std::vector in_nhwc_dims = {1, 300, 300, 30}; - auto *in_t = new lite::Tensor(kNumberTypeFloat, in_nhwc_dims, schema::Format_NHWC, lite::Tensor::Category::VAR); + auto *in_t = new lite::Tensor(kNumberTypeFloat, in_nhwc_dims, mindspore::NHWC, lite::Tensor::Category::VAR); in_t->MallocData(); std::string in_nhwc_path = "./deconv/deconv_fp32_nhwc_input1.bin"; auto in_nhwc = reinterpret_cast(mindspore::lite::ReadFile(in_nhwc_path.c_str(), &buffer_size)); @@ -657,7 +656,7 @@ int DeConvTestInit4(std::vector *inputs_, std::vector w_nhwc_dims = {30, 3, 3, 40}; auto *weight_t = - new lite::Tensor(kNumberTypeFloat, w_nhwc_dims, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR); + new lite::Tensor(kNumberTypeFloat, w_nhwc_dims, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR); weight_t->MallocData(); std::string weight_path = "./deconv/deconv_fp32_nchw_weight1.bin"; auto weight_nchw = reinterpret_cast(mindspore::lite::ReadFile(weight_path.c_str(), &buffer_size)); @@ -665,7 +664,7 @@ int DeConvTestInit4(std::vector *inputs_, std::vectorChannel(), 0, 0); inputs_->push_back(weight_t); - auto *bias_t = new lite::Tensor(kNumberTypeFloat, {40}, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR); + auto *bias_t = new lite::Tensor(kNumberTypeFloat, {40}, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR); bias_t->MallocData(); std::string bias_path = "./deconv/deconv_fp32_nchw_bias1.bin"; auto bias = mindspore::lite::ReadFile(bias_path.c_str(), &buffer_size); @@ -673,7 +672,7 @@ int DeConvTestInit4(std::vector *inputs_, std::vectorpush_back(bias_t); std::vector out_nhwc_dims = {1, 302, 302, 40}; - auto *out_t = new lite::Tensor(kNumberTypeFloat, out_nhwc_dims, schema::Format_NHWC, lite::Tensor::Category::VAR); + auto *out_t = new lite::Tensor(kNumberTypeFloat, out_nhwc_dims, mindspore::NHWC, lite::Tensor::Category::VAR); out_t->MallocData(); outputs_->push_back(out_t); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/detection_post_process_test.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/detection_post_process_test.cc index eb3294451c7..aacc988b2c2 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/detection_post_process_test.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/detection_post_process_test.cc @@ -35,7 +35,7 @@ void DetectionPostProcessTestInit(std::vector *inputs_, std::vec reinterpret_cast(mindspore::lite::ReadFile(input_boxes_path.c_str(), &input_boxes_size)); auto *input_boxes = new lite::Tensor; input_boxes->set_data_type(kNumberTypeFloat32); - input_boxes->set_format(schema::Format_NHWC); + input_boxes->set_format(mindspore::NHWC); input_boxes->set_shape({1, 1917, 4}); input_boxes->MallocData(); memcpy(input_boxes->MutableData(), input_boxes_data, input_boxes_size); @@ -47,7 +47,7 @@ void DetectionPostProcessTestInit(std::vector *inputs_, std::vec reinterpret_cast(mindspore::lite::ReadFile(input_scores_path.c_str(), &input_scores_size)); auto *input_scores = new lite::Tensor; input_scores->set_data_type(kNumberTypeFloat32); - input_scores->set_format(schema::Format_NHWC); + input_scores->set_format(mindspore::NHWC); input_scores->set_shape({1, 1917, 91}); input_scores->MallocData(); memcpy(input_scores->MutableData(), input_scores_data, input_scores_size); @@ -63,7 +63,7 @@ void DetectionPostProcessTestInit(std::vector *inputs_, std::vec quant_arg.scale = 0.00645306; input_anchors->AddQuantParam(quant_arg); input_anchors->set_data_type(kNumberTypeUInt8); - input_anchors->set_format(schema::Format_NHWC); + input_anchors->set_format(mindspore::NHWC); input_anchors->set_shape({1917, 4}); input_anchors->MallocData(); memcpy(input_anchors->MutableData(), input_anchors_data, input_anchors_size); @@ -72,28 +72,28 @@ void DetectionPostProcessTestInit(std::vector *inputs_, std::vec auto *output_boxes = new lite::Tensor; output_boxes->set_data_type(kNumberTypeFloat32); output_boxes->set_shape({1, 10, 4}); - output_boxes->set_format(schema::Format_NHWC); + output_boxes->set_format(mindspore::NHWC); output_boxes->MallocData(); memset(output_boxes->MutableData(), 0, output_boxes->ElementsNum() * sizeof(float)); auto *output_classes = new lite::Tensor; output_classes->set_data_type(kNumberTypeFloat32); output_classes->set_shape({1, 10}); - output_classes->set_format(schema::Format_NHWC); + output_classes->set_format(mindspore::NHWC); output_classes->MallocData(); memset(output_classes->MutableData(), 0, output_classes->ElementsNum() * sizeof(float)); auto *output_scores = new lite::Tensor; output_scores->set_data_type(kNumberTypeFloat32); output_scores->set_shape({1, 10}); - output_scores->set_format(schema::Format_NHWC); + output_scores->set_format(mindspore::NHWC); output_scores->MallocData(); memset(output_scores->MutableData(), 0, output_scores->ElementsNum() * sizeof(float)); auto *output_num_det = new lite::Tensor; output_num_det->set_data_type(kNumberTypeFloat32); output_num_det->set_shape({1}); - output_num_det->set_format(schema::Format_NHWC); + output_num_det->set_format(mindspore::NHWC); output_num_det->MallocData(); memset(output_num_det->MutableData(), 0, output_num_det->ElementsNum() * sizeof(float)); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/elu_fp32_test.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/elu_fp32_test.cc index 0c20c6e2034..612695f4329 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/elu_fp32_test.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/elu_fp32_test.cc @@ -30,14 +30,13 @@ class TestEluFp32 : public mindspore::CommonTest { }; void EluTestInit(std::vector *inputs_, std::vector *outputs_, EluParameter *elu_param) { - Tensor *in_t_first = - new Tensor(kNumberTypeFloat32, {6, 2}, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR); + Tensor *in_t_first = new Tensor(kNumberTypeFloat32, {6, 2}, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR); in_t_first->MallocData(); float in_first[] = {-1, 2, -3, 4, -5, 6, -7, 8, -9, 10, -11, 0}; memcpy(in_t_first->MutableData(), in_first, sizeof(float) * in_t_first->ElementsNum()); inputs_->push_back(in_t_first); - Tensor *outputs_t = new Tensor(kNumberTypeFloat32, {6, 2}, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR); + Tensor *outputs_t = new Tensor(kNumberTypeFloat32, {6, 2}, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR); outputs_t->MallocData(); outputs_->push_back(outputs_t); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/embedding_lookup_fp32_test.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/embedding_lookup_fp32_test.cc index 2987e1f69b3..25f28deabdd 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/embedding_lookup_fp32_test.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/embedding_lookup_fp32_test.cc @@ -31,28 +31,25 @@ class TestEmbeddingLookupFp32 : public mindspore::CommonTest { void ElTestInit(std::vector *inputs_, std::vector *outputs_, EmbeddingLookupParameter *embedding_lookup_param) { - Tensor *in_t_first = - new Tensor(kNumberTypeFloat32, {6, 2}, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR); + Tensor *in_t_first = new Tensor(kNumberTypeFloat32, {6, 2}, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR); in_t_first->MallocData(); float in_first[] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}; memcpy(in_t_first->MutableData(), in_first, sizeof(float) * in_t_first->ElementsNum()); inputs_->push_back(in_t_first); - Tensor *in_t_second = - new Tensor(kNumberTypeFloat32, {4, 2}, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR); + Tensor *in_t_second = new Tensor(kNumberTypeFloat32, {4, 2}, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR); in_t_second->MallocData(); float in_second[] = {1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8}; memcpy(in_t_second->MutableData(), in_second, sizeof(float) * in_t_second->ElementsNum()); inputs_->push_back(in_t_second); - Tensor *ids_t = new Tensor(kNumberTypeFloat32, {2, 3}, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR); + Tensor *ids_t = new Tensor(kNumberTypeFloat32, {2, 3}, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR); ids_t->MallocData(); int ids[] = {1, 9, 2, 4, 6, 7}; memcpy(ids_t->MutableData(), ids, sizeof(int) * ids_t->ElementsNum()); inputs_->push_back(ids_t); - Tensor *outputs_t = - new Tensor(kNumberTypeInt32, {2, 3, 2}, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR); + Tensor *outputs_t = new Tensor(kNumberTypeInt32, {2, 3, 2}, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR); outputs_t->MallocData(); outputs_->push_back(outputs_t); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/fullconnection_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/fullconnection_fp32_tests.cc index 97095ab1d5b..f2cf2c8570f 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/fullconnection_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/fullconnection_fp32_tests.cc @@ -34,14 +34,14 @@ class TestFcFp32 : public mindspore::CommonTest { int FcTestInit1(std::vector *inputs_, std::vector *outputs_, MatMulParameter *matmal_param, float **correct) { - auto *in_t = new Tensor(kNumberTypeFloat, {2, 2, 2, 2}, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR); + auto *in_t = new Tensor(kNumberTypeFloat, {2, 2, 2, 2}, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR); in_t->MallocData(); float in[] = {-3.2366564, -4.7733846, -7.8329225, 16.146885, 5.060793, -6.1471, -1.7680453, -6.5721383, 17.87506, -5.1192183, 10.742863, 1.4536934, 19.693445, 19.45783, 5.063163, 0.5234792}; memcpy(in_t->MutableData(), in, sizeof(float) * in_t->ElementsNum()); inputs_->push_back(in_t); - auto *weight_t = new Tensor(kNumberTypeFloat, {3, 8}, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR); + auto *weight_t = new Tensor(kNumberTypeFloat, {3, 8}, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR); weight_t->MallocData(); float weight[] = {-0.0024438887, 0.0006738146, -0.008169129, 0.0021510671, -0.012470592, -0.0053063435, 0.006050155, 0.008656233, 0.012911413, -0.0028635843, -0.00034080597, -0.0010622552, @@ -50,13 +50,13 @@ int FcTestInit1(std::vector *inputs_, std::vectorMutableData(), weight, sizeof(float) * weight_t->ElementsNum()); inputs_->push_back(weight_t); - auto *bias_t = new Tensor(kNumberTypeFloat, {3}, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR); + auto *bias_t = new Tensor(kNumberTypeFloat, {3}, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR); bias_t->MallocData(); float bias[] = {1.6103756, -0.9872417, 0.546849}; memcpy(bias_t->MutableData(), bias, sizeof(float) * bias_t->ElementsNum()); inputs_->push_back(bias_t); - auto *out_t = new Tensor(kNumberTypeFloat, {2, 3}, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR); + auto *out_t = new Tensor(kNumberTypeFloat, {2, 3}, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR); out_t->MallocData(); outputs_->push_back(out_t); @@ -95,28 +95,28 @@ int FcTestInit2(std::vector *inputs_, std::vectorMallocData(); std::string in_path = "./matmul/FcFp32_input1.bin"; auto in_data = mindspore::lite::ReadFile(in_path.c_str(), &buffer_size); memcpy(in_t->MutableData(), in_data, buffer_size); inputs_->push_back(in_t); - auto *weight_t = new Tensor(kNumberTypeFloat, {30, 80}, schema::Format_NCHW, lite::Tensor::Category::CONST_TENSOR); + auto *weight_t = new Tensor(kNumberTypeFloat, {30, 80}, mindspore::NCHW, lite::Tensor::Category::CONST_TENSOR); weight_t->MallocData(); std::string weight_path = "./matmul/FcFp32_weight1.bin"; auto w_data = mindspore::lite::ReadFile(weight_path.c_str(), &buffer_size); memcpy(weight_t->MutableData(), w_data, buffer_size); inputs_->push_back(weight_t); - auto *bias_t = new Tensor(kNumberTypeFloat, {30}, schema::Format_NCHW, lite::Tensor::Category::CONST_TENSOR); + auto *bias_t = new Tensor(kNumberTypeFloat, {30}, mindspore::NCHW, lite::Tensor::Category::CONST_TENSOR); bias_t->MallocData(); std::string bias_path = "./matmul/FcFp32_bias1.bin"; auto bias_data = mindspore::lite::ReadFile(bias_path.c_str(), &buffer_size); memcpy(bias_t->MutableData(), bias_data, buffer_size); inputs_->push_back(bias_t); - auto *out_t = new Tensor(kNumberTypeFloat, {20, 30}, schema::Format_NCHW, lite::Tensor::Category::CONST_TENSOR); + auto *out_t = new Tensor(kNumberTypeFloat, {20, 30}, mindspore::NCHW, lite::Tensor::Category::CONST_TENSOR); out_t->MallocData(); outputs_->push_back(out_t); @@ -153,13 +153,13 @@ TEST_F(TestFcFp32, FcTest2) { void FcTestInit3(std::vector *inputs_, std::vector *outputs_, MatMulParameter *matmal_param, float **correct) { - auto *in_t = new Tensor(kNumberTypeFloat, {1, 1, 1, 20}, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR); + auto *in_t = new Tensor(kNumberTypeFloat, {1, 1, 1, 20}, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR); in_t->MallocData(); float in[] = {1, 0, 3, 0, 4, 5, 2, 5, 2, 5, 1, 5, 0, 1, 2, 0, 2, 1, 0, 5}; memcpy(in_t->MutableData(), in, sizeof(float) * in_t->ElementsNum()); inputs_->push_back(in_t); - auto *weight_t = new Tensor(kNumberTypeFloat, {16, 20}, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR); + auto *weight_t = new Tensor(kNumberTypeFloat, {16, 20}, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR); weight_t->MallocData(); float weight[] = {0, 5, 5, 3, 0, 5, 3, 1, 0, 1, 3, 0, 5, 5, 2, 4, 0, 1, 1, 2, 3, 0, 5, 5, 4, 4, 1, 4, 1, 1, 5, 3, 3, 1, 0, 3, 1, 2, 4, 5, 3, 4, 4, 0, 3, 5, 0, 3, 4, 1, 0, 1, 3, 4, 0, 5, 2, 5, 0, 4, 2, 2, 2, 2, @@ -174,7 +174,7 @@ void FcTestInit3(std::vector *inputs_, std::vectorMutableData(), weight, sizeof(float) * weight_t->ElementsNum()); inputs_->push_back(weight_t); - auto *out_t = new Tensor(kNumberTypeFloat, {1, 16}, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR); + auto *out_t = new Tensor(kNumberTypeFloat, {1, 16}, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR); out_t->MallocData(); outputs_->push_back(out_t); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/l2norm_fp32_test.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/l2norm_fp32_test.cc index 40d22f6c4b5..f2ed0cfacc4 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/l2norm_fp32_test.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/l2norm_fp32_test.cc @@ -18,7 +18,6 @@ #include "mindspore/lite/src/runtime/kernel/arm/fp32/l2_norm_fp32.h" #include "src/kernel_registry.h" #include "src/lite_kernel.h" -using mindspore::schema::Format_NHWC; namespace mindspore { class TestL2NormFp32 : public mindspore::CommonTest { @@ -49,7 +48,7 @@ void TestL2NormFp32::TearDown() { void TestL2NormFp32::Init(const std::vector &input_shape, const std::vector &output_shape, float *input_data, float *output_data, const int axis_num, ActType activation_type, const int thread_num) { in_tensor_.set_data_type(kNumberTypeFloat32); - in_tensor_.set_format(Format_NHWC); + in_tensor_.set_format(mindspore::NHWC); in_tensor_.set_shape(input_shape); out_tensor_.set_data_type(kNumberTypeFloat32); out_tensor_.set_shape(output_shape); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/lstm_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/lstm_fp32_tests.cc index 506d332cf96..3f82c3d5450 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/lstm_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/lstm_fp32_tests.cc @@ -51,7 +51,7 @@ void InitLstmForwardCreator(std::vector *inputs, std::vectorset_data_type(kNumberTypeFloat32); - weight_i->set_format(schema::Format_NHWC); + weight_i->set_format(mindspore::NHWC); weight_i->set_shape({1, lstm_param->hidden_size_ * 4, lstm_param->input_size_}); weight_i->MallocData(); memcpy(weight_i->MutableData(), weight_i_data.data(), weight_i_data.size() * sizeof(float)); @@ -64,7 +64,7 @@ void InitLstmForwardCreator(std::vector *inputs, std::vectorset_data_type(kNumberTypeFloat32); - weight_h->set_format(schema::Format_NHWC); + weight_h->set_format(mindspore::NHWC); weight_h->set_shape({1, lstm_param->hidden_size_ * 4, lstm_param->hidden_size_}); weight_h->MallocData(); memcpy(weight_h->MutableData(), weight_h_data.data(), weight_h_data.size() * sizeof(float)); @@ -76,7 +76,7 @@ void InitLstmForwardCreator(std::vector *inputs, std::vectorset_data_type(kNumberTypeFloat32); - bias->set_format(schema::Format_NHWC); + bias->set_format(mindspore::NHWC); bias->set_shape({1, lstm_param->hidden_size_ * 4 * 2}); bias->MallocData(); memcpy(bias->MutableData(), bias_data.data(), bias_data.size() * sizeof(float)); @@ -85,7 +85,7 @@ void InitLstmForwardCreator(std::vector *inputs, std::vector state_data = {0, 0, 0}; auto *state = new lite::Tensor; state->set_data_type(kNumberTypeFloat32); - state->set_format(schema::Format_NHWC); + state->set_format(mindspore::NHWC); state->set_shape({1, lstm_param->batch_, lstm_param->hidden_size_}); state->MallocData(); memcpy(state->MutableData(), state_data.data(), state_data.size() * sizeof(float)); @@ -101,21 +101,21 @@ void InitLstmForwardCreator(std::vector *inputs, std::vectorset_data_type(kNumberTypeFloat32); output->set_shape({lstm_param->seq_len_, lstm_param->batch_, lstm_param->hidden_size_}); - output->set_format(schema::Format_NHWC); + output->set_format(mindspore::NHWC); output->MallocData(); memset(output->MutableData(), 0, output->ElementsNum() * sizeof(float)); auto *cell_state = new lite::Tensor; cell_state->set_data_type(kNumberTypeFloat32); cell_state->set_shape({1, lstm_param->batch_, lstm_param->hidden_size_}); - cell_state->set_format(schema::Format_NHWC); + cell_state->set_format(mindspore::NHWC); cell_state->MallocData(); memset(cell_state->MutableData(), 0, cell_state->ElementsNum() * sizeof(float)); auto *hidden_state = new lite::Tensor; hidden_state->set_data_type(kNumberTypeFloat32); hidden_state->set_shape({1, lstm_param->batch_, lstm_param->hidden_size_}); - hidden_state->set_format(schema::Format_NHWC); + hidden_state->set_format(mindspore::NHWC); hidden_state->MallocData(); memset(hidden_state->MutableData(), 0, hidden_state->ElementsNum() * sizeof(float)); @@ -203,7 +203,7 @@ void InitLstmBackwardCreator(std::vector *inputs, std::vector
  • set_data_type(kNumberTypeFloat32); - weight_i->set_format(schema::Format_NHWC); + weight_i->set_format(mindspore::NHWC); weight_i->set_shape({2, lstm_param->hidden_size_ * 4, lstm_param->input_size_}); weight_i->MallocData(); memcpy(weight_i->MutableData(), weight_i_data.data(), weight_i_data.size() * sizeof(float)); @@ -221,7 +221,7 @@ void InitLstmBackwardCreator(std::vector *inputs, std::vector
  • set_data_type(kNumberTypeFloat32); - weight_h->set_format(schema::Format_NHWC); + weight_h->set_format(mindspore::NHWC); weight_h->set_shape({2, lstm_param->hidden_size_ * 4, lstm_param->hidden_size_}); weight_h->MallocData(); memcpy(weight_h->MutableData(), weight_h_data.data(), weight_h_data.size() * sizeof(float)); @@ -236,7 +236,7 @@ void InitLstmBackwardCreator(std::vector *inputs, std::vector
  • set_data_type(kNumberTypeFloat32); - bias->set_format(schema::Format_NHWC); + bias->set_format(mindspore::NHWC); bias->set_shape({2, lstm_param->hidden_size_ * 4 * 2}); bias->MallocData(); memcpy(bias->MutableData(), bias_data.data(), bias_data.size() * sizeof(float)); @@ -245,7 +245,7 @@ void InitLstmBackwardCreator(std::vector *inputs, std::vector
  • state_data = {0, 0, 0, 0, 0, 0}; auto *state = new lite::Tensor; state->set_data_type(kNumberTypeFloat32); - state->set_format(schema::Format_NHWC); + state->set_format(mindspore::NHWC); state->set_shape({2, lstm_param->batch_, lstm_param->hidden_size_}); state->MallocData(); memcpy(state->MutableData(), state_data.data(), state_data.size() * sizeof(float)); @@ -261,21 +261,21 @@ void InitLstmBackwardCreator(std::vector *inputs, std::vector
  • set_data_type(kNumberTypeFloat32); output->set_shape({lstm_param->seq_len_, 2, lstm_param->batch_, lstm_param->hidden_size_}); - output->set_format(schema::Format_NHWC); + output->set_format(mindspore::NHWC); output->MallocData(); memset(output->MutableData(), 0, output->ElementsNum() * sizeof(float)); auto *cell_state = new lite::Tensor; cell_state->set_data_type(kNumberTypeFloat32); cell_state->set_shape({2, lstm_param->batch_, lstm_param->hidden_size_}); - cell_state->set_format(schema::Format_NHWC); + cell_state->set_format(mindspore::NHWC); cell_state->MallocData(); memset(cell_state->MutableData(), 0, cell_state->ElementsNum() * sizeof(float)); auto *hidden_state = new lite::Tensor; hidden_state->set_data_type(kNumberTypeFloat32); hidden_state->set_shape({2, lstm_param->batch_, lstm_param->hidden_size_}); - hidden_state->set_format(schema::Format_NHWC); + hidden_state->set_format(mindspore::NHWC); hidden_state->MallocData(); memset(hidden_state->MutableData(), 0, hidden_state->ElementsNum() * sizeof(float)); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/matmul_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/matmul_fp32_tests.cc index 73f9d42b673..1876fdfcf8e 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/matmul_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/matmul_fp32_tests.cc @@ -70,18 +70,17 @@ TEST_F(TestMatMulFp32, Row2Col8Test2) { int MMTestInit(std::vector *inputs_, std::vector *outputs_, float *a_ptr, float *b_ptr, const std::vector &a_shape, const std::vector &b_shape, const std::vector &c_shape) { - auto in_t = new lite::Tensor(kNumberTypeFloat, a_shape, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR); + auto in_t = new lite::Tensor(kNumberTypeFloat, a_shape, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR); in_t->MallocData(); memcpy(in_t->MutableData(), a_ptr, sizeof(float) * in_t->ElementsNum()); inputs_->push_back(in_t); - auto weight_t = - new lite::Tensor(kNumberTypeFloat, b_shape, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR); + auto weight_t = new lite::Tensor(kNumberTypeFloat, b_shape, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR); weight_t->MallocData(); memcpy(weight_t->MutableData(), b_ptr, sizeof(float) * weight_t->ElementsNum()); inputs_->push_back(weight_t); - auto out_t = new lite::Tensor(kNumberTypeFloat, c_shape, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR); + auto out_t = new lite::Tensor(kNumberTypeFloat, c_shape, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR); out_t->MallocData(); outputs_->push_back(out_t); @@ -91,24 +90,22 @@ int MMTestInit(std::vector *inputs_, std::vector int MMTestInit2(std::vector *inputs_, std::vector *outputs_, float *a_ptr, float *b_ptr, float *bias_ptr, const std::vector &a_shape, const std::vector &b_shape, const std::vector &bias_shape, const std::vector &c_shape) { - auto in_t = new lite::Tensor(kNumberTypeFloat, a_shape, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR); + auto in_t = new lite::Tensor(kNumberTypeFloat, a_shape, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR); in_t->MallocData(); memcpy(in_t->MutableData(), a_ptr, sizeof(float) * in_t->ElementsNum()); inputs_->push_back(in_t); - auto weight_t = - new lite::Tensor(kNumberTypeFloat, b_shape, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR); + auto weight_t = new lite::Tensor(kNumberTypeFloat, b_shape, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR); weight_t->MallocData(); memcpy(weight_t->MutableData(), b_ptr, sizeof(float) * weight_t->ElementsNum()); inputs_->push_back(weight_t); - auto bias_t = - new lite::Tensor(kNumberTypeFloat, bias_shape, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR); + auto bias_t = new lite::Tensor(kNumberTypeFloat, bias_shape, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR); bias_t->MallocData(); memcpy(bias_t->MutableData(), bias_ptr, sizeof(float) * bias_t->ElementsNum()); inputs_->push_back(bias_t); - auto out_t = new lite::Tensor(kNumberTypeFloat, c_shape, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR); + auto out_t = new lite::Tensor(kNumberTypeFloat, c_shape, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR); out_t->MallocData(); outputs_->push_back(out_t); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/non_max_suppression_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/non_max_suppression_fp32_tests.cc index ff755e844ad..e833e77cc8a 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/non_max_suppression_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/non_max_suppression_fp32_tests.cc @@ -19,7 +19,6 @@ #include "mindspore/lite/src/runtime/kernel/arm/fp32/non_max_suppression_fp32.h" #include "src/kernel_registry.h" #include "src/lite_kernel.h" -using mindspore::schema::Format_NHWC; namespace mindspore { class TestNMSFp32 : public mindspore::CommonTest { @@ -63,12 +62,12 @@ void TestNMSFp32::Init(const std::vector &box_tensor_shape, float *box_data const std::vector &score_tensor_shape, float *score_data, int32_t max_output, float iou_threshold, float score_threshold, int center_box_point) { box_tensor_.set_data_type(kNumberTypeFloat32); - box_tensor_.set_format(Format_NHWC); + box_tensor_.set_format(mindspore::NHWC); box_tensor_.set_shape(box_tensor_shape); box_tensor_.set_data(box_data); score_tensor_.set_data_type(kNumberTypeFloat32); - score_tensor_.set_format(Format_NHWC); + score_tensor_.set_format(mindspore::NHWC); score_tensor_.set_shape(score_tensor_shape); score_tensor_.set_data(score_data); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/pad_fp32_test.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/pad_fp32_test.cc index f148f2e9714..efe4472ea76 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/pad_fp32_test.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/pad_fp32_test.cc @@ -21,7 +21,6 @@ #include "mindspore/lite/src/kernel_registry.h" #include "schema/ops_generated.h" -using mindspore::schema::Format_NHWC; using mindspore::schema::PaddingMode; using mindspore::schema::PaddingMode_CONSTANT; using mindspore::schema::PaddingMode_REFLECT; @@ -61,7 +60,7 @@ void TestPadFp32::Prepare(const std::vector &input_shape, const std::vector float *output_data, PaddingMode mode, int *paddings, int padding_length, float constant_value, const int thread_num) { in_tensor_.set_data_type(kNumberTypeFloat32); - in_tensor_.set_format(Format_NHWC); + in_tensor_.set_format(mindspore::NHWC); in_tensor_.set_shape(input_shape); out_tensor_.set_data_type(kNumberTypeFloat32); out_tensor_.set_shape(output_shape); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/power_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/power_fp32_tests.cc index f52d8176225..83083c82e56 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/power_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/power_fp32_tests.cc @@ -28,18 +28,17 @@ class TestPowerFp32 : public mindspore::CommonTest { int PowerTestInit(std::vector *inputs_, std::vector *outputs_, float *a_ptr, float *b_ptr, const std::vector &a_shape, const std::vector &b_shape, const std::vector &c_shape) { - auto in_t = new lite::Tensor(kNumberTypeFloat, a_shape, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR); + auto in_t = new lite::Tensor(kNumberTypeFloat, a_shape, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR); in_t->MallocData(); memcpy(in_t->MutableData(), a_ptr, sizeof(float) * in_t->ElementsNum()); inputs_->push_back(in_t); - auto weight_t = - new lite::Tensor(kNumberTypeFloat, b_shape, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR); + auto weight_t = new lite::Tensor(kNumberTypeFloat, b_shape, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR); weight_t->MallocData(); memcpy(weight_t->MutableData(), b_ptr, sizeof(float) * weight_t->ElementsNum()); inputs_->push_back(weight_t); - auto out_t = new lite::Tensor(kNumberTypeFloat, c_shape, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR); + auto out_t = new lite::Tensor(kNumberTypeFloat, c_shape, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR); out_t->MallocData(); outputs_->push_back(out_t); @@ -48,12 +47,12 @@ int PowerTestInit(std::vector *inputs_, std::vector *inputs_, std::vector *outputs_, float *a_ptr, const std::vector &a_shape, const std::vector &c_shape) { - auto in_t = new lite::Tensor(kNumberTypeFloat, a_shape, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR); + auto in_t = new lite::Tensor(kNumberTypeFloat, a_shape, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR); in_t->MallocData(); memcpy(in_t->MutableData(), a_ptr, sizeof(float) * in_t->ElementsNum()); inputs_->push_back(in_t); - auto out_t = new lite::Tensor(kNumberTypeFloat, c_shape, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR); + auto out_t = new lite::Tensor(kNumberTypeFloat, c_shape, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR); out_t->MallocData(); outputs_->push_back(out_t); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/resize_bilinear_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/resize_bilinear_fp32_tests.cc index 73893d1fbb5..ab0817da7d2 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/resize_bilinear_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/resize_bilinear_fp32_tests.cc @@ -20,7 +20,6 @@ #include "mindspore/lite/src/tensor.h" #include "nnacl/resize_parameter.h" #include "schema/ops_generated.h" -using mindspore::schema::Format_NHWC; namespace mindspore { @@ -54,7 +53,7 @@ void TestResizeBilinearFp32::Prepare(const std::vector &input_shape, const float *input_data, float *output_data, const bool align_corners, const int thread_num) { in_tensor_.set_data_type(kNumberTypeFloat32); - in_tensor_.set_format(Format_NHWC); + in_tensor_.set_format(mindspore::NHWC); in_tensor_.set_shape(input_shape); out_tensor_.set_data_type(kNumberTypeFloat32); out_tensor_.set_shape(output_shape); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/roi_pooling_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/roi_pooling_fp32_tests.cc index 45f0495e968..f6e6d580779 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/roi_pooling_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/roi_pooling_fp32_tests.cc @@ -28,17 +28,17 @@ class TestROIPoolingFp32 : public mindspore::CommonTest { int ROIPoolingTestInit(std::vector *inputs_, std::vector *outputs_, float *a_ptr, float *b_ptr, const std::vector &a_shape, const std::vector &b_shape, const std::vector &c_shape) { - auto in_t = new lite::Tensor(kNumberTypeFloat, a_shape, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR); + auto in_t = new lite::Tensor(kNumberTypeFloat, a_shape, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR); in_t->MallocData(); memcpy(in_t->MutableData(), a_ptr, sizeof(float) * in_t->ElementsNum()); inputs_->push_back(in_t); - auto roi_t = new lite::Tensor(kNumberTypeFloat, b_shape, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR); + auto roi_t = new lite::Tensor(kNumberTypeFloat, b_shape, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR); roi_t->MallocData(); memcpy(roi_t->MutableData(), b_ptr, sizeof(float) * roi_t->ElementsNum()); inputs_->push_back(roi_t); - auto out_t = new lite::Tensor(kNumberTypeFloat, c_shape, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR); + auto out_t = new lite::Tensor(kNumberTypeFloat, c_shape, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR); out_t->MallocData(); outputs_->push_back(out_t); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/scale_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/scale_fp32_tests.cc index 6415f5f4864..48909768cc0 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/scale_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/scale_fp32_tests.cc @@ -26,7 +26,6 @@ using mindspore::schema::ActivationType; using mindspore::schema::ActivationType_NO_ACTIVATION; using mindspore::schema::ActivationType_RELU; using mindspore::schema::ActivationType_RELU6; -using mindspore::schema::Format_NHWC; namespace mindspore { class TestScaleFp32 : public mindspore::CommonTest { @@ -66,13 +65,13 @@ void TestScaleFp32::Prepare(const std::vector &input_shape, const std::vect float *input_data, float *scale_data, float *offset_data, float *output_data, int axis, ActivationType act_type, const int thread_num) { in_tensor_.set_data_type(kNumberTypeFloat32); - in_tensor_.set_format(Format_NHWC); + in_tensor_.set_format(mindspore::NHWC); in_tensor_.set_shape(input_shape); scale_tensor_.set_data_type(kNumberTypeFloat32); - scale_tensor_.set_format(Format_NHWC); + scale_tensor_.set_format(mindspore::NHWC); scale_tensor_.set_shape(scale_shape); offset_tensor_.set_data_type(kNumberTypeFloat32); - offset_tensor_.set_format(Format_NHWC); + offset_tensor_.set_format(mindspore::NHWC); offset_tensor_.set_shape(offset_shape); out_tensor_.set_data_type(kNumberTypeFloat32); out_tensor_.set_shape(output_shape); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/skip_gram_fp32.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/skip_gram_fp32.cc index 4e4cdf18ffc..200b9f49f19 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/skip_gram_fp32.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/skip_gram_fp32.cc @@ -33,14 +33,14 @@ class TestSkipGramFp32 : public mindspore::CommonTest { void SkipGramTestInit(std::vector *inputs_, std::vector *outputs_, SkipGramParameter *skip_gram_param) { - Tensor *in_t_first = new Tensor(kObjectTypeString, {}, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR); + Tensor *in_t_first = new Tensor(kObjectTypeString, {}, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR); char sentence[] = "The quick brown fox jumps over the lazy dog"; std::vector str; str.push_back({43, sentence}); mindspore::lite::WriteStringsToTensor(in_t_first, str); inputs_->push_back(in_t_first); - Tensor *output = new Tensor(kObjectTypeString, {}, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR); + Tensor *output = new Tensor(kObjectTypeString, {}, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR); outputs_->push_back(output); skip_gram_param->ngram_size = 3; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/space_to_depth_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/space_to_depth_fp32_tests.cc index a611a90559e..62b04a3e4bd 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/space_to_depth_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/space_to_depth_fp32_tests.cc @@ -54,7 +54,7 @@ TEST_F(SpaceToDepthTestFp32, SpaceToDepthTest2) { lite::Tensor input_tensor; input_tensor.set_data(input.data()); input_tensor.set_shape(in_shape); - input_tensor.set_format(schema::Format_NHWC); + input_tensor.set_format(mindspore::NHWC); input_tensor.set_data_type(kNumberTypeFloat32); std::vector inputs_tensor; inputs_tensor.push_back(&input_tensor); @@ -66,7 +66,7 @@ TEST_F(SpaceToDepthTestFp32, SpaceToDepthTest2) { lite::Tensor output_tensor; output_tensor.set_data(output.data()); output_tensor.set_shape(out_shape); - output_tensor.set_format(schema::Format_NHWC); + output_tensor.set_format(mindspore::NHWC); output_tensor.set_data_type(kNumberTypeFloat32); std::vector outputs_tensor; outputs_tensor.push_back(&output_tensor); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/transpose_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/transpose_fp32_tests.cc index 84ae8b09a1b..5fc3101ced2 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/transpose_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/transpose_fp32_tests.cc @@ -226,7 +226,7 @@ TEST_F(TestTransposeFp32, TransposeFp32_test5) { /* 1x2x3x2x2 */ lite::Tensor input_tensor; input_tensor.set_data(input.data()); input_tensor.set_shape(input_shape); - input_tensor.set_format(schema::Format_NHWC); + input_tensor.set_format(mindspore::NHWC); input_tensor.set_data_type(kNumberTypeFloat32); lite::Tensor perm_tensor(kNumberTypeInt32, {5}); perm_tensor.set_data(perm); @@ -234,7 +234,7 @@ TEST_F(TestTransposeFp32, TransposeFp32_test5) { /* 1x2x3x2x2 */ lite::Tensor output_tensor; output_tensor.set_data(output.data()); output_tensor.set_shape(output_shape); - output_tensor.set_format(schema::Format_NHWC); + output_tensor.set_format(mindspore::NHWC); output_tensor.set_data_type(kNumberTypeFloat32); std::vector outputs_tensor; outputs_tensor.emplace_back(&output_tensor); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/conv_1x1_int8_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/conv_1x1_int8_tests.cc index aef8177dd3d..ca4a139cfd3 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/conv_1x1_int8_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/conv_1x1_int8_tests.cc @@ -71,7 +71,7 @@ TEST_F(TestConv1x1Int8, Input1x1PrePack2) { int Conv1x1Int8TestInit1_perchannel(std::vector *inputs_, std::vector *outputs_, ConvParameter *conv_param, int8_t **correct) { - Tensor *in_t = new Tensor(kNumberTypeInt8, {1, 2, 3, 4}, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR); + Tensor *in_t = new Tensor(kNumberTypeInt8, {1, 2, 3, 4}, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR); auto in_quant_arg = new mindspore::lite::QuantArg(); in_quant_arg->zeroPoint = -42, in_quant_arg->scale = 0.117647; in_t->AddQuantParam(*in_quant_arg); @@ -81,8 +81,7 @@ int Conv1x1Int8TestInit1_perchannel(std::vector *inputs_, std::v memcpy(in_t->MutableData(), in, in_t->ElementsNum() * sizeof(int8_t)); inputs_->push_back(in_t); - Tensor *weight_t = - new Tensor(kNumberTypeInt8, {3, 1, 1, 4}, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR); + Tensor *weight_t = new Tensor(kNumberTypeInt8, {3, 1, 1, 4}, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR); weight_t->MallocData(); auto weight_quant_arg1 = new mindspore::lite::QuantArg(); weight_quant_arg1->zeroPoint = 66, weight_quant_arg1->scale = 0.96439215686275; @@ -97,7 +96,7 @@ int Conv1x1Int8TestInit1_perchannel(std::vector *inputs_, std::v memcpy(weight_t->MutableData(), weight, weight_t->ElementsNum() * sizeof(int8_t)); inputs_->push_back(weight_t); - Tensor *out_t = new Tensor(kNumberTypeInt8, {1, 2, 3, 3}, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR); + Tensor *out_t = new Tensor(kNumberTypeInt8, {1, 2, 3, 3}, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR); out_t->MallocData(); auto output_quant_arg = new mindspore::lite::QuantArg(); output_quant_arg->zeroPoint = 7, output_quant_arg->scale = 0.294321233; @@ -141,7 +140,7 @@ TEST_F(TestConv1x1Int8, Conv1x1TestPerChannel) { int Conv1x1Int8TestInit1(std::vector *inputs_, std::vector *outputs_, ConvParameter *conv_param, int8_t **correct) { - Tensor *in_t = new Tensor(kNumberTypeInt8, {1, 2, 3, 4}, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR); + Tensor *in_t = new Tensor(kNumberTypeInt8, {1, 2, 3, 4}, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR); auto in_quant_arg = new mindspore::lite::QuantArg(); in_quant_arg->zeroPoint = -42, in_quant_arg->scale = 0.117647; in_t->AddQuantParam(*in_quant_arg); @@ -153,8 +152,7 @@ int Conv1x1Int8TestInit1(std::vector *inputs_, std::vector(in_t->MutableData())); inputs_->push_back(in_t); - Tensor *weight_t = - new Tensor(kNumberTypeInt8, {3, 1, 1, 4}, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR); + Tensor *weight_t = new Tensor(kNumberTypeInt8, {3, 1, 1, 4}, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR); auto weight_quant_arg = new mindspore::lite::QuantArg(); weight_quant_arg->zeroPoint = 66, weight_quant_arg->scale = 0.036439215686275; weight_t->AddQuantParam(*weight_quant_arg); @@ -165,7 +163,7 @@ int Conv1x1Int8TestInit1(std::vector *inputs_, std::vector(weight_t->MutableData())); inputs_->push_back(weight_t); - Tensor *out_t = new Tensor(kNumberTypeInt8, {1, 2, 3, 3}, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR); + Tensor *out_t = new Tensor(kNumberTypeInt8, {1, 2, 3, 3}, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR); out_t->MallocData(); auto output_quant_arg = new mindspore::lite::QuantArg(); output_quant_arg->zeroPoint = 7, output_quant_arg->scale = 0.234321233; @@ -212,7 +210,7 @@ TEST_F(TestConv1x1Int8, Conv1x1Int8Test1) { int Conv1x1Int8TestInit2(std::vector *inputs_, std::vector *outputs_, ConvParameter *conv_param, int8_t **correct) { size_t buffer_size; - Tensor *in_t = new Tensor(kNumberTypeInt8, {1, 2, 3, 4}, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR); + Tensor *in_t = new Tensor(kNumberTypeInt8, {1, 2, 3, 4}, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR); auto in_quant_arg = new mindspore::lite::QuantArg(); in_quant_arg->zeroPoint = -42, in_quant_arg->scale = 0.117647; in_t->AddQuantParam(*in_quant_arg); @@ -223,8 +221,7 @@ int Conv1x1Int8TestInit2(std::vector *inputs_, std::vectorpush_back(in_t); delete[] input; - Tensor *weight_t = - new Tensor(kNumberTypeInt8, {3, 1, 1, 4}, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR); + Tensor *weight_t = new Tensor(kNumberTypeInt8, {3, 1, 1, 4}, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR); auto weight_quant_arg = new mindspore::lite::QuantArg(); weight_quant_arg->zeroPoint = 66, weight_quant_arg->scale = 0.036439215686275; weight_t->AddQuantParam(*weight_quant_arg); @@ -235,7 +232,7 @@ int Conv1x1Int8TestInit2(std::vector *inputs_, std::vectorpush_back(weight_t); delete[] weight; - Tensor *bias_t = new Tensor(kNumberTypeInt32, {4}, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR); + Tensor *bias_t = new Tensor(kNumberTypeInt32, {4}, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR); weight_t->MallocData(); std::string bias_path = "./bias"; auto bias = mindspore::lite::ReadFile(bias_path.c_str(), &buffer_size); @@ -243,7 +240,7 @@ int Conv1x1Int8TestInit2(std::vector *inputs_, std::vectorpush_back(bias_t); delete[] bias; - Tensor *out_t = new Tensor(kNumberTypeInt8, {1, 2, 3, 3}, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR); + Tensor *out_t = new Tensor(kNumberTypeInt8, {1, 2, 3, 3}, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR); out_t->MallocData(); auto output_quant_arg = new mindspore::lite::QuantArg(); output_quant_arg->zeroPoint = 7, output_quant_arg->scale = 0.234321233; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/deconv_int8_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/deconv_int8_tests.cc index ddba197bdc7..7928abe3d22 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/deconv_int8_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/deconv_int8_tests.cc @@ -30,7 +30,6 @@ using mindspore::lite::DeviceType; namespace mindspore { using mindspore::lite::QuantArg; using mindspore::lite::Tensor; -using mindspore::schema::Format_NHWC; class TestDeconvInt8 : public mindspore::CommonTest { public: TestDeconvInt8() {} @@ -274,7 +273,7 @@ int DeConvInt8TestInit1(std::vector *inputs_, std::vectorMallocData(); int8_t in[] = {6, 43, 38, 24, -8, 12, 41, -24, -20, 41, -19, -6, -26, -6, 23, -31, 34, 45, 8, 45, -39, -27, -48, 12}; memcpy(in_t->MutableData(), in, sizeof(int8_t) * in_t->ElementsNum()); @@ -283,7 +282,7 @@ int DeConvInt8TestInit1(std::vector *inputs_, std::vectorAddQuantParam(*in_quant_arg); inputs_->push_back(in_t); - auto *weight_t = new Tensor(kNumberTypeInt8, {3, 3, 3, 2}, Format_NHWC, lite::Tensor::Category::CONST_TENSOR); + auto *weight_t = new Tensor(kNumberTypeInt8, {3, 3, 3, 2}, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR); weight_t->MallocData(); int8_t weight[] = {66, 89, 98, 74, 95, 86, 125, 95, 105, 83, 116, 94, 90, 80, 86, 59, 72, 92, 64, 76, 92, 80, 90, 87, 106, 55, 105, 60, 75, 53, 81, 81, 98, 81, 86, 59, @@ -294,7 +293,7 @@ int DeConvInt8TestInit1(std::vector *inputs_, std::vectorAddQuantParam(*w_quant_arg); inputs_->push_back(weight_t); - auto *out_t = new Tensor(kNumberTypeInt8, {1, 7, 3, 2}, Format_NHWC, lite::Tensor::Category::VAR); + auto *out_t = new Tensor(kNumberTypeInt8, {1, 7, 3, 2}, mindspore::NHWC, lite::Tensor::Category::VAR); out_t->MallocData(); auto *out_quant_arg = new QuantArg(); out_quant_arg->zeroPoint = 31, out_quant_arg->scale = 0.3439215686275; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/fullconnection_int8_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/fullconnection_int8_tests.cc index 67750d4d661..648ddf37807 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/fullconnection_int8_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/fullconnection_int8_tests.cc @@ -42,7 +42,7 @@ extern void QuantProcess(float *input, int len, float min, float max, float *sca extern lite::Tensor *MakeQuantTensor(int8_t *data, int len, std::vector *shape, float scale, int zp); lite::Tensor *MakeIntTensor(int *data, int len, std::vector *shape) { - auto tensor = new lite::Tensor(kNumberTypeInt32, *shape, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR); + auto tensor = new lite::Tensor(kNumberTypeInt32, *shape, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR); tensor->MallocData(); auto tensor_ptr = reinterpret_cast(tensor->MutableData()); memcpy(tensor_ptr, data, len * sizeof(int)); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/matmul_int8_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/matmul_int8_tests.cc index db72d08023f..a6797440186 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/matmul_int8_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/matmul_int8_tests.cc @@ -47,7 +47,7 @@ void QuantProcess(float *input, int len, float min, float max, float *scale, int } lite::Tensor *MakeQuantTensor(int8_t *data, int len, std::vector *shape, float scale, int zp) { - auto tensor = new lite::Tensor(kNumberTypeInt8, *shape, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR); + auto tensor = new lite::Tensor(kNumberTypeInt8, *shape, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR); tensor->MallocData(); if (data) { auto tensor_ptr = reinterpret_cast(tensor->MutableData()); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/pad_int8_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/pad_int8_tests.cc index b209038119f..a58cd710b90 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/pad_int8_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/pad_int8_tests.cc @@ -33,7 +33,7 @@ class TestPadInt8 : public mindspore::CommonTest { int PadInt8TestInit1(std::vector *inputs_, std::vector *outputs_, PadParameter *pad_param, int8_t **correct) { - Tensor *in_t = new Tensor(kNumberTypeInt8, {3}, schema::Format_NHWC, lite::Tensor::CONST_TENSOR); + Tensor *in_t = new Tensor(kNumberTypeInt8, {3}, mindspore::NHWC, lite::Tensor::CONST_TENSOR); in_t->MallocData(); int8_t in[] = {1, 1, 1}; memcpy(in_t->MutableData(), in, sizeof(int8_t) * in_t->ElementsNum()); @@ -42,7 +42,7 @@ int PadInt8TestInit1(std::vector *inputs_, std::vector *outp in_t->AddQuantParam(*in_quant_arg); inputs_->push_back(in_t); - Tensor *out_t = new Tensor(kNumberTypeInt8, {7}, schema::Format_NHWC, lite::Tensor::CONST_TENSOR); + Tensor *out_t = new Tensor(kNumberTypeInt8, {7}, mindspore::NHWC, lite::Tensor::CONST_TENSOR); out_t->MallocData(); QuantArg *out_quant_arg = new QuantArg(); out_quant_arg->zeroPoint = 10, out_quant_arg->scale = 0.31228156; @@ -84,7 +84,7 @@ TEST_F(TestPadInt8, PadInt8Test1) { int PadInt8TestInit2(std::vector *inputs_, std::vector *outputs_, PadParameter *pad_param, int8_t **correct) { - Tensor *in_t = new Tensor(kNumberTypeInt8, {6, 2}, schema::Format_NHWC, lite::Tensor::VAR); + Tensor *in_t = new Tensor(kNumberTypeInt8, {6, 2}, mindspore::NHWC, lite::Tensor::VAR); in_t->MallocData(); int8_t in[] = {18, 71, 99, -6, 5, -119, 86, 13, 15, -85, -41, -77}; memcpy(in_t->MutableData(), in, sizeof(int8_t) * in_t->ElementsNum()); @@ -93,7 +93,7 @@ int PadInt8TestInit2(std::vector *inputs_, std::vector *outp in_t->AddQuantParam(*in_quant_arg); inputs_->push_back(in_t); - Tensor *out_t = new Tensor(kNumberTypeInt8, {10, 5}, schema::Format_NHWC, lite::Tensor::VAR); + Tensor *out_t = new Tensor(kNumberTypeInt8, {10, 5}, mindspore::NHWC, lite::Tensor::VAR); out_t->MallocData(); QuantArg *out_quant_arg = new QuantArg(); out_quant_arg->zeroPoint = 10, out_quant_arg->scale = 0.31228156; @@ -137,7 +137,7 @@ TEST_F(TestPadInt8, PadInt8Test2) { int PadInt8TestInit4(std::vector *inputs_, std::vector *outputs_, PadParameter *pad_param, int8_t **correct) { - Tensor *in_t = new Tensor(kNumberTypeInt8, {2, 3, 2, 1}, schema::Format_NHWC, lite::Tensor::VAR); + Tensor *in_t = new Tensor(kNumberTypeInt8, {2, 3, 2, 1}, mindspore::NHWC, lite::Tensor::VAR); in_t->MallocData(); int8_t in[] = {73, 24, 7, -31, -109, -2, 69, -64, 51, -45, 38, 53}; memcpy(in_t->MutableData(), in, sizeof(int8_t) * in_t->ElementsNum()); @@ -146,7 +146,7 @@ int PadInt8TestInit4(std::vector *inputs_, std::vector *outp in_t->AddQuantParam(*in_quant_arg); inputs_->push_back(in_t); - Tensor *out_t = new Tensor(kNumberTypeInt8, {6, 6, 4, 3}, schema::Format_NHWC, lite::Tensor::VAR); + Tensor *out_t = new Tensor(kNumberTypeInt8, {6, 6, 4, 3}, mindspore::NHWC, lite::Tensor::VAR); out_t->MallocData(); QuantArg *out_quant_arg = new QuantArg(); out_quant_arg->zeroPoint = 10, out_quant_arg->scale = 0.31228156; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/quant_dtype_cast_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/quant_dtype_cast_tests.cc index f76c54dd052..f9fb81d39f0 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/quant_dtype_cast_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/quant_dtype_cast_tests.cc @@ -43,7 +43,7 @@ TEST_F(QuantDTypeCastTestFp32, QuantDTypeCastTest1) { input_tensor.set_data(input.data()); input_tensor.set_shape(in_shape); input_tensor.set_data_type(kNumberTypeInt8); - input_tensor.set_format(schema::Format_NHWC); + input_tensor.set_format(mindspore::NHWC); input_tensor.AddQuantParam(quant_arg); std::vector inputs_tensor; @@ -106,7 +106,7 @@ TEST_F(QuantDTypeCastTestFp32, QuantDTypeCastTest2) { lite::Tensor output_tensor; output_tensor.set_data(output.data()); output_tensor.set_shape(out_shape); - output_tensor.set_format(schema::Format_NHWC); + output_tensor.set_format(mindspore::NHWC); output_tensor.set_data_type(kNumberTypeInt8); std::vector outputs_tensor; outputs_tensor.emplace_back(&output_tensor); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/string/normalize.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/string/normalize.cc index 040656f4381..1c86a856853 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/string/normalize.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/string/normalize.cc @@ -47,7 +47,7 @@ class TestNormalize : public mindspore::CommonTest { void TestNormalize::NormalizeTestInit() { input_tensor_.set_data_type(kObjectTypeString); - input_tensor_.set_format(schema::Format_NHWC); + input_tensor_.set_format(mindspore::NHWC); std::vector str_pack; const char sentence1[] = " I don't know what happened\n"; @@ -57,7 +57,7 @@ void TestNormalize::NormalizeTestInit() { mindspore::lite::WriteStringsToTensor(&input_tensor_, str_pack); output_tensor_.set_data_type(kObjectTypeString); - output_tensor_.set_format(schema::Format_NHWC); + output_tensor_.set_format(mindspore::NHWC); } TEST_F(TestNormalize, TestSentence) { diff --git a/mindspore/lite/test/ut/src/runtime/kernel/opencl/cast_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/opencl/cast_tests.cc index dd3b0987c2d..f76b58ad26e 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/opencl/cast_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/opencl/cast_tests.cc @@ -63,8 +63,8 @@ TEST_F(TestCastSelfOpenCL, Castfp32tofp16) { MS_LOG(INFO) << " init tensors "; std::vector shape = {1, 23, 39, 47}; auto tensor_type = lite::Tensor::CONST_TENSOR; - auto *input_tensor = new (std::nothrow) lite::Tensor(kNumberTypeFloat32, shape, schema::Format_NHWC, tensor_type); - auto *output_tensor = new (std::nothrow) lite::Tensor(kNumberTypeFloat16, shape, schema::Format_NHWC, tensor_type); + auto *input_tensor = new (std::nothrow) lite::Tensor(kNumberTypeFloat32, shape, mindspore::NHWC, tensor_type); + auto *output_tensor = new (std::nothrow) lite::Tensor(kNumberTypeFloat16, shape, mindspore::NHWC, tensor_type); if (input_tensor == nullptr || output_tensor == nullptr) { MS_LOG(INFO) << " new input_tensor or output_tensor failed "; return; @@ -175,8 +175,8 @@ TEST_F(TestCastSelfOpenCL, Castfp16tofp32) { MS_LOG(INFO) << " init tensors "; std::vector shape = {1, 23, 39, 47}; auto tensor_type = lite::Tensor::CONST_TENSOR; - auto *input_tensor = new (std::nothrow) lite::Tensor(kNumberTypeFloat16, shape, schema::Format_NHWC, tensor_type); - auto *output_tensor = new (std::nothrow) lite::Tensor(kNumberTypeFloat32, shape, schema::Format_NHWC, tensor_type); + auto *input_tensor = new (std::nothrow) lite::Tensor(kNumberTypeFloat16, shape, mindspore::NHWC, tensor_type); + auto *output_tensor = new (std::nothrow) lite::Tensor(kNumberTypeFloat32, shape, mindspore::NHWC, tensor_type); if (input_tensor == nullptr || output_tensor == nullptr) { MS_LOG(INFO) << " new input_tensor or output_tensor failed "; return; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/opencl/common.cc b/mindspore/lite/test/ut/src/runtime/kernel/opencl/common.cc index 3b4c545d9ae..f6cdbd15381 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/opencl/common.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/opencl/common.cc @@ -24,7 +24,6 @@ using mindspore::kernel::LiteKernel; using mindspore::kernel::OpenCLSubGraph; using mindspore::lite::KernelRegistry; -using mindspore::schema::Format::Format_NHWC; namespace mindspore::lite::opencl::test { // muti-output @@ -67,11 +66,11 @@ void TestMain(const std::vector &input_infos, const std::vec auto &shape = std::get<0>(input_info); auto category = std::get<2>(input_info); auto data_type = std::get<3>(input_info); - in_tensors.emplace_back(std::make_shared(data_type, shape, Format_NHWC, category)); + in_tensors.emplace_back(std::make_shared(data_type, shape, mindspore::NHWC, category)); } for (auto outout_info : output_info) { const std::vector &output_shape = std::get<0>(outout_info); - out_tensors.emplace_back(std::make_shared(std::get<2>(outout_info), output_shape, Format_NHWC, VAR)); + out_tensors.emplace_back(std::make_shared(std::get<2>(outout_info), output_shape, mindspore::NHWC, VAR)); } // secondly, init weight Tensor's data std::vector kernel_inputs; @@ -237,7 +236,7 @@ void TestMain(const std::vector &input_infos, std::tuple(input_info); auto category = std::get<2>(input_info); auto data_type = std::get<3>(input_info); - tensors.emplace_back(std::make_shared(data_type, shape, Format_NHWC, category)); + tensors.emplace_back(std::make_shared(data_type, shape, mindspore::NHWC, category)); } // secondly, init weight Tensor's data std::vector kernel_inputs; @@ -263,7 +262,7 @@ void TestMain(const std::vector &input_infos, std::tuple &output_shape = std::get<0>(output_info); float *expect_data = std::get<1>(output_info); - auto output = Tensor(kNumberTypeFloat32, output_shape, Format_NHWC, VAR); + auto output = Tensor(kNumberTypeFloat32, output_shape, mindspore::NHWC, VAR); // simulating benchmark: session_->CompileGraph() -> scheduler.Schedule() -> BuildKernels() MS_LOG(DEBUG) << "create OpenCLKernel"; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/opencl/fill_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/opencl/fill_tests.cc index d660ffcd0ee..10a153bb594 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/opencl/fill_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/opencl/fill_tests.cc @@ -22,7 +22,6 @@ using mindspore::lite::Tensor; using mindspore::schema::PrimitiveType_Fill; using mindspore::schema::PrimitiveType_Shape; -using mindspore::schema::Format::Format_NHWC; // PrimitiveType_Fill: src/ops/populate/fill_populate.cc @@ -45,8 +44,8 @@ TEST_F(TestFillOpenCLCI, Fp32testfill) { float correctOutput[] = {9, 9, 9, 9, 9, 9, 9, 9, 9}; auto data_type = kNumberTypeFloat32; std::vector output_shape = {3, 3}; - auto in_tensor1 = Tensor(data_type, input_shape1, Format_NHWC, lite::Tensor::VAR); - auto output_tensor = Tensor(data_type, output_shape, Format_NHWC, lite::Tensor::VAR); + auto in_tensor1 = Tensor(data_type, input_shape1, mindspore::NHWC, lite::Tensor::VAR); + auto output_tensor = Tensor(data_type, output_shape, mindspore::NHWC, lite::Tensor::VAR); std::vector inputs{&in_tensor1}; std::vector outputs{&output_tensor}; @@ -116,8 +115,8 @@ TEST_F(TestFillOpenCLCI, Fp32testshape) { float correctOutput[] = {2, 4}; auto data_type = kNumberTypeFloat32; std::vector output_shape = {2}; - auto in_tensor1 = Tensor(data_type, input_shape1, Format_NHWC, lite::Tensor::VAR); - auto output_tensor = Tensor(data_type, output_shape, Format_NHWC, lite::Tensor::VAR); + auto in_tensor1 = Tensor(data_type, input_shape1, mindspore::NHWC, lite::Tensor::VAR); + auto output_tensor = Tensor(data_type, output_shape, mindspore::NHWC, lite::Tensor::VAR); std::vector inputs{&in_tensor1}; std::vector outputs{&output_tensor}; diff --git a/mindspore/lite/tools/benchmark/benchmark.cc b/mindspore/lite/tools/benchmark/benchmark.cc index e350b67a8ea..a42361ae42e 100644 --- a/mindspore/lite/tools/benchmark/benchmark.cc +++ b/mindspore/lite/tools/benchmark/benchmark.cc @@ -932,7 +932,7 @@ std::string GenerateOutputFileName(tensor::MSTensor *tensor, const std::string & if (TYPE_ID_MAP.find(tensor->data_type()) != TYPE_ID_MAP.end()) { file_name += TYPE_ID_MAP.at(tensor->data_type()); } - auto tensor_format = static_cast(tensor)->format(); + auto tensor_format = static_cast(static_cast(tensor)->format()); if (TENSOR_FORMAT_MAP.find(tensor_format) != TENSOR_FORMAT_MAP.end()) { file_name += "_" + TENSOR_FORMAT_MAP.at(tensor_format) + ".bin"; } diff --git a/mindspore/lite/tools/converter/legacy_optimizer/graph/infershape_pass.cc b/mindspore/lite/tools/converter/legacy_optimizer/graph/infershape_pass.cc index 5f870a075f5..b5d6d1acdef 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/graph/infershape_pass.cc +++ b/mindspore/lite/tools/converter/legacy_optimizer/graph/infershape_pass.cc @@ -89,7 +89,7 @@ void ConvertString(MetaGraphT *graph, uint32_t index, bool *convert_succ, std::v auto &tensorT = graph->allTensors.at(index); auto tensor_shape = tensorT->dims; lite_tensor = std::make_unique( - TypeId(tensorT->dataType), tensor_shape, tensorT->format, + TypeId(tensorT->dataType), tensor_shape, static_cast(tensorT->format), TensorCategory(tensorT->nodeType, tensorT->dims.size(), TypeId(tensorT->dataType), tensorT->data.size())); if (lite_tensor == nullptr) { MS_LOG(ERROR) << "lite tensor is nullptr"; @@ -117,7 +117,7 @@ void ConvertOtherTensor(MetaGraphT *graph, uint32_t index, bool *convert_succ, s auto &tensorT = graph->allTensors.at(index); auto tensor_shape = tensorT->dims; lite_tensor = std::make_unique( - TypeId(tensorT->dataType), tensor_shape, tensorT->format, + TypeId(tensorT->dataType), tensor_shape, static_cast(tensorT->format), TensorCategory(tensorT->nodeType, tensorT->dims.size(), TypeId(tensorT->dataType), tensorT->data.size())); if (lite_tensor == nullptr) { MS_LOG(ERROR) << "lite tensor is nullptr"; @@ -227,7 +227,7 @@ void SetDataType(MetaGraphT *graph, const std::vector &output_tensors, uint32_t i, uint32_t infer_node_index) { auto &node = graph->nodes.at(infer_node_index); auto &output_tensor = graph->allTensors.at(node->outputIndex[i]); - output_tensor->format = output_tensors[i]->format(); + output_tensor->format = static_cast(output_tensors[i]->format()); output_tensor->dataType = output_tensors[i]->data_type(); if (output_tensors[i]->data_type() == kObjectTypeTensorType) { auto tensor_list = reinterpret_cast(output_tensors[i]); diff --git a/mindspore/lite/tools/optimizer/fusion/constant_folding_fusion.cc b/mindspore/lite/tools/optimizer/fusion/constant_folding_fusion.cc index 8e2af588483..87c39b210db 100644 --- a/mindspore/lite/tools/optimizer/fusion/constant_folding_fusion.cc +++ b/mindspore/lite/tools/optimizer/fusion/constant_folding_fusion.cc @@ -85,7 +85,7 @@ std::vector GetCNodeInputTensors(const CNodePtr &cnode, lite::converte return {}; } auto tensor = new (std::nothrow) - Tensor(TypeId(data_info.data_type_), data_info.shape_, schema::Format(data_info.format_), + Tensor(TypeId(data_info.data_type_), data_info.shape_, static_cast(data_info.format_), lite::TensorCategory(0, data_info.shape_.size(), TypeId(data_info.data_type_), data_info.data_.size())); if (tensor == nullptr) { MS_LOG(ERROR) << "new a tensor is nullptr."; diff --git a/mindspore/lite/tools/optimizer/graph/infershape_pass.cc b/mindspore/lite/tools/optimizer/graph/infershape_pass.cc index aa128b65735..031432e16cf 100644 --- a/mindspore/lite/tools/optimizer/graph/infershape_pass.cc +++ b/mindspore/lite/tools/optimizer/graph/infershape_pass.cc @@ -201,9 +201,9 @@ STATUS InferShapePass::GetCNodeInputTensors(const CNodePtr &cnode, std::vectorset_shape(shape); tensor->set_data_type(tensor_info->data_type()); if (primitive->GetAttr(ops::kFormat) != nullptr && i == WEIGHT_INDEX) { - tensor->set_format(static_cast(GetValue(primitive->GetAttr(ops::kFormat)))); + tensor->set_format(static_cast(GetValue(primitive->GetAttr(ops::kFormat)))); } else { - tensor->set_format(schema::Format::Format_NHWC); + tensor->set_format(mindspore::NHWC); } } diff --git a/mindspore/lite/tools/optimizer/graph/node_infershape.cc b/mindspore/lite/tools/optimizer/graph/node_infershape.cc index 1e2812a96ee..decbdbc54d6 100644 --- a/mindspore/lite/tools/optimizer/graph/node_infershape.cc +++ b/mindspore/lite/tools/optimizer/graph/node_infershape.cc @@ -51,7 +51,7 @@ void SetConvWeightFormat(const CNodePtr &cnode, const std::vector(cnode->input(0)); MS_ASSERT(prim != nullptr); if (prim->GetAttr(ops::kFormat) != nullptr && inputs.size() > 1) { - inputs[1]->set_format(static_cast(GetValue(prim->GetAttr(ops::kFormat)))); + inputs[1]->set_format(static_cast(GetValue(prim->GetAttr(ops::kFormat)))); } } @@ -63,7 +63,7 @@ void RectifyFormat(const CNodePtr &cnode, const std::vector &inp for (auto &input : inputs) { auto shape = input->shape(); if (shape.size() == 4 && shape[3] == 3 && shape[1] == -1) { - input->set_format(schema::Format_NHWC); + input->set_format(mindspore::NHWC); } } } @@ -435,7 +435,7 @@ STATUS NodeInferShape::ConvertToLiteTensor(const std::vector &da lite::Tensor *tensor = nullptr; if (data_info.data_type_ != kObjectTypeTensorType) { tensor = new (std::nothrow) lite::Tensor(TypeId(data_info.data_type_), data_info.shape_, - (schema::Format)data_info.format_, tensor_category); + (mindspore::Format)data_info.format_, tensor_category); } else { tensor = new (std::nothrow) lite::TensorList(data_info.shape_, std::vector(), tensor_category); }