add set_format and get_format interface to mstensor

This commit is contained in:
xuanyue 2021-06-07 15:42:49 +08:00
parent 4932854776
commit 46a7d6c74b
80 changed files with 393 additions and 318 deletions

View File

@ -174,6 +174,8 @@ if(PLATFORM_ARM64)
endif()
install(FILES ${TOP_DIR}/mindspore/core/ir/dtype/type_id.h DESTINATION ${RUNTIME_INC_DIR}/ir/dtype
COMPONENT ${RUNTIME_COMPONENT_NAME})
install(FILES ${TOP_DIR}/mindspore/core/ir/format.h DESTINATION ${RUNTIME_INC_DIR}/ir
COMPONENT ${RUNTIME_COMPONENT_NAME})
install(DIRECTORY ${TOP_DIR}/include/api/ DESTINATION ${RUNTIME_INC_DIR}/api
COMPONENT ${RUNTIME_COMPONENT_NAME} FILES_MATCHING PATTERN "*.h" PATTERN "ops*" EXCLUDE)
file(GLOB NNACL_FILES GLOB ${NNACL_DIR}/*.h)
@ -231,6 +233,8 @@ elseif(PLATFORM_ARM32)
endif()
install(FILES ${TOP_DIR}/mindspore/core/ir/dtype/type_id.h DESTINATION ${RUNTIME_INC_DIR}/ir/dtype
COMPONENT ${RUNTIME_COMPONENT_NAME})
install(FILES ${TOP_DIR}/mindspore/core/ir/format.h DESTINATION ${RUNTIME_INC_DIR}/ir
COMPONENT ${RUNTIME_COMPONENT_NAME})
install(DIRECTORY ${TOP_DIR}/include/api/ DESTINATION ${RUNTIME_INC_DIR}/api
COMPONENT ${RUNTIME_COMPONENT_NAME} FILES_MATCHING PATTERN "*.h" PATTERN "ops*" EXCLUDE)
file(GLOB NNACL_FILES GLOB ${NNACL_DIR}/*.h)
@ -310,6 +314,8 @@ elseif(WIN32)
endif()
install(FILES ${TOP_DIR}/mindspore/core/ir/dtype/type_id.h DESTINATION ${RUNTIME_INC_DIR}/ir/dtype
COMPONENT ${RUNTIME_COMPONENT_NAME})
install(FILES ${TOP_DIR}/mindspore/core/ir/format.h DESTINATION ${RUNTIME_INC_DIR}/ir
COMPONENT ${RUNTIME_COMPONENT_NAME})
install(DIRECTORY ${TOP_DIR}/include/api/ DESTINATION ${RUNTIME_INC_DIR}/api
COMPONENT ${RUNTIME_COMPONENT_NAME} FILES_MATCHING PATTERN "*.h" PATTERN "ops*" EXCLUDE)
install(FILES ${TOP_DIR}/build/mindspore/src/${MINDSPORE_LITE_LIB_NAME}.a DESTINATION ${RUNTIME_LIB_DIR}
@ -333,6 +339,8 @@ else()
endif()
install(FILES ${TOP_DIR}/mindspore/core/ir/dtype/type_id.h DESTINATION ${RUNTIME_INC_DIR}/ir/dtype
COMPONENT ${RUNTIME_COMPONENT_NAME})
install(FILES ${TOP_DIR}/mindspore/core/ir/format.h DESTINATION ${RUNTIME_INC_DIR}/ir
COMPONENT ${RUNTIME_COMPONENT_NAME})
install(DIRECTORY ${TOP_DIR}/include/api/ DESTINATION ${RUNTIME_INC_DIR}/api
COMPONENT ${RUNTIME_COMPONENT_NAME} FILES_MATCHING PATTERN "*.h" PATTERN "ops*" EXCLUDE)
install(FILES ${TOP_DIR}/mindspore/lite/build/src/${MINDSPORE_LITE_LIB_NAME}.so DESTINATION ${RUNTIME_LIB_DIR}

View File

@ -21,6 +21,7 @@
#include "ir/dtype/type_id.h"
namespace mindspore {
enum Format : int64_t;
namespace tensor {
/// \brief MSTensor defined tensor in MindSpore Lite.
class MS_API MSTensor {
@ -58,8 +59,20 @@ class MS_API MSTensor {
virtual TypeId data_type() const = 0;
/// \brief Set data type of current MSTensor.
///
/// \param[in] data_type Define data type, which is shown in type_id.h.
virtual void set_data_type(TypeId data_type) = 0;
/// \brief Set format of current MSTensor.
///
/// \param[in] format Define format of data, which is shown in format.h
virtual void set_format(mindspore::Format format) = 0;
/// \brief Get format of current MSTensor.
///
/// \return format, which is shown in format.h
virtual mindspore::Format format() const = 0;
/// \brief Get shape of the MindSpore Lite MSTensor.
///
/// \return A vector of int as the shape of the MindSpore Lite MSTensor.

View File

@ -32,7 +32,7 @@ void *MemoryAllocator::MallocWeightTensor(TypeId type_id, size_t size, MallocTyp
size_t type_size = item->second;
std::vector<int> shape = {1, static_cast<int>(size / type_size)};
auto cate = type == kOfflinePackWeight ? Tensor::Category::CONST_TENSOR : Tensor::Category::VAR;
Tensor *weight = new (std::nothrow) lite::Tensor(type_id, shape, schema::Format_NHWC, cate);
Tensor *weight = new (std::nothrow) lite::Tensor(type_id, shape, mindspore::NHWC, cate);
MS_CHECK_PTR_RET_NULL(weight);
std::string runtime_addr = kWeightPrefixName + std::to_string(weight_index_++);
malloc_weights_addr_.insert(std::make_pair(weight, runtime_addr));

View File

@ -38,6 +38,7 @@ const char tensor_header[] = R"RAW(
#define MINDSPORE_LITE_MICRO_LIBRARY_SOURCE_TENSOR_H_
#include "include/ms_tensor.h"
#include "include/ir/format.h"
namespace mindspore {
namespace lite {
@ -64,6 +65,8 @@ class MTensor : public mindspore::tensor::MSTensor {
AllocatorPtr allocator() const override { return nullptr; }
TypeId data_type() const override { return data_type_; }
void set_data_type(TypeId data_type) override { data_type_ = data_type; }
void set_format(mindspore::Format format) override {}
mindspore::Format format() const override { return mindspore::NHWC; }
Vector<int> shape() const override { return shape_; }
void set_shape(const Vector<int> &shape) override { shape_ = shape; }
int ElementsNum() const override;

View File

@ -78,7 +78,8 @@ int CoderGraph::ConvertTensors() {
}
int origin_data_type = static_cast<int>(origin_tensor->dataType());
Tensor *dstTensor = new (std::nothrow)
lite::Tensor(TypeId(origin_data_type), shape, origin_tensor->format(), TensorCategory(origin_tensor));
lite::Tensor(TypeId(origin_data_type), shape, static_cast<mindspore::Format>(origin_tensor->format()),
TensorCategory(origin_tensor));
MS_CHECK_PTR(dstTensor);
if (origin_tensor->nodeType() == NodeType_ValueNode && origin_tensor->data() != nullptr &&
origin_tensor->data()->size() > 0) {

View File

@ -95,37 +95,38 @@ int Conv2DBaseCoder::MallocConvQuantParams(size_t input_arg_num, size_t filter_a
return RET_OK;
}
std::string Conv2DBaseCoder::LayoutTransformFp32(schema::Format src_format, schema::Format dst_format) {
std::string Conv2DBaseCoder::LayoutTransformFp32(mindspore::Format src_format, mindspore::Format dst_format) {
std::string ret;
if (src_format == schema::Format_NHWC && dst_format == schema::Format_NC4HW4) {
if (src_format == mindspore::NHWC && dst_format == mindspore::NC4HW4) {
ret = "PackNHWCToNC4HW4Fp32";
} else if (src_format == schema::Format_NHWC && dst_format == schema::Format_NHWC4) {
} else if (src_format == mindspore::NHWC && dst_format == mindspore::NHWC4) {
ret = "PackNHWCToNHWC4Fp32";
} else if (src_format == schema::Format_NC4HW4 && dst_format == schema::Format_NHWC4) {
} else if (src_format == mindspore::NC4HW4 && dst_format == mindspore::NHWC4) {
ret = "PackNC4HW4ToNHWC4Fp32";
} else if (src_format == schema::Format_NCHW && dst_format == schema::Format_NC4HW4) {
} else if (src_format == mindspore::NCHW && dst_format == mindspore::NC4HW4) {
ret = "PackNCHWToNC4HW4Fp32";
} else if (src_format == schema::Format_NC4HW4 && dst_format == schema::Format_NHWC) {
} else if (src_format == mindspore::NC4HW4 && dst_format == mindspore::NHWC) {
ret = "PackNC4HW4ToNHWCFp32";
} else {
MS_LOG(ERROR) << "Unsupported transform from " << schema::EnumNameFormat(src_format) << " to "
<< schema::EnumNameFormat(dst_format);
MS_LOG(ERROR) << "Unsupported transform from " << schema::EnumNameFormat(static_cast<schema::Format>(src_format))
<< " to " << schema::EnumNameFormat(static_cast<schema::Format>(dst_format));
}
return ret;
}
std::string Conv2DBaseCoder::LayoutTransformInt8(schema::Format src_format, schema::Format dst_format) {
std::string Conv2DBaseCoder::LayoutTransformInt8(mindspore::Format src_format, mindspore::Format dst_format) {
std::string ret;
if (src_format == schema::Format_NHWC && dst_format == schema::Format_NHWC4) {
if (src_format == mindspore::NHWC && dst_format == mindspore::NHWC4) {
ret = "PackNHWCToNHWC4Int8";
} else {
MS_LOG(ERROR) << "Unsupported transform from " << schema::EnumNameFormat(src_format) << " to "
<< schema::EnumNameFormat(dst_format);
MS_LOG(ERROR) << "Unsupported transform from " << schema::EnumNameFormat(static_cast<schema::Format>(src_format))
<< " to " << schema::EnumNameFormat(static_cast<schema::Format>(dst_format));
}
return ret;
}
std::string Conv2DBaseCoder::LayoutTransform(TypeId data_type, schema::Format src_format, schema::Format dst_format) {
std::string Conv2DBaseCoder::LayoutTransform(TypeId data_type, mindspore::Format src_format,
mindspore::Format dst_format) {
std::string ret;
switch (data_type) {
case kNumberTypeInt8:
@ -352,8 +353,8 @@ int Conv2DBaseCoder::Init() {
int Conv2DBaseCoder::CheckLayout(lite::Tensor *input_tensor) {
mindspore::TypeId data_type = input_tensor->data_type();
schema::Format input_format = input_tensor->format();
schema::Format execute_format = schema::Format_NHWC4;
mindspore::Format input_format = input_tensor->format();
mindspore::Format execute_format = mindspore::NHWC4;
convert_func_ = LayoutTransform(data_type, input_format, execute_format);
MS_CHECK_TRUE(!convert_func_.empty(), "layout convert func is nullptr.");
return RET_OK;

View File

@ -57,11 +57,11 @@ class Conv2DBaseCoder : public OperatorCoder {
int CheckLayout(lite::Tensor *input_tensor);
std::string LayoutTransformFp32(schema::Format src_format, schema::Format dst_format);
std::string LayoutTransformFp32(mindspore::Format src_format, mindspore::Format dst_format);
std::string LayoutTransformInt8(schema::Format src_format, schema::Format dst_format);
std::string LayoutTransformInt8(mindspore::Format src_format, mindspore::Format dst_format);
std::string LayoutTransform(TypeId data_type, schema::Format src_format, schema::Format dst_format);
std::string LayoutTransform(TypeId data_type, mindspore::Format src_format, mindspore::Format dst_format);
private:
int MallocConvQuantParams(size_t input_arg_num, size_t filter_arg_num, size_t output_arg_num);

View File

@ -29,7 +29,7 @@ int FullConnectionInt8Coder::Prepare(CoderContext *const context) {
return RET_OK;
}
void FullConnectionInt8Coder::ConfigInputOutput() { output_tensor_->set_format(schema::Format_NHWC); }
void FullConnectionInt8Coder::ConfigInputOutput() { output_tensor_->set_format(mindspore::NHWC); }
int FullConnectionInt8Coder::DoCode(CoderContext *const context) {
Serializer code;

View File

@ -109,7 +109,7 @@ int Conv2D3x3Int8Coder::InitTmpBuffer(CoderContext *const context) {
return RET_OK;
}
void Conv2D3x3Int8Coder::ConfigInputOutput() { output_tensor_->set_format(schema::Format_NHWC); }
void Conv2D3x3Int8Coder::ConfigInputOutput() { output_tensor_->set_format(mindspore::NHWC); }
int Conv2D3x3Int8Coder::Prepare(CoderContext *const context) {
MS_CHECK_RET_CODE(Conv2DBaseCoder::Init(), "ConvolutionBase init failed.");

View File

@ -75,24 +75,24 @@ std::string GetTensorDataType(TypeId type) {
}
}
std::string EnumMicroTensorFormat(schema::Format format) {
std::string EnumMicroTensorFormat(mindspore::Format format) {
switch (format) {
case schema::Format_NHWC:
case mindspore::NHWC:
return "Format_NHWC";
case schema::Format_NCHW:
case mindspore::NCHW:
return "Format_NCHW";
case schema::Format_HWKC:
case mindspore::HWKC:
return "Format_HWKC";
case schema::Format_HWCK:
case mindspore::HWCK:
return "Format_HWCK";
case schema::Format_KCHW:
case mindspore::KCHW:
return "Format_KCHW";
case schema::Format_CKHW:
case mindspore::CKHW:
return "Format_CKHW";
case schema::Format_NC4HW4:
case mindspore::NC4HW4:
return "Format_NC4HW4";
default:
MS_LOG(ERROR) << "unsupported format: " << schema::EnumNameFormat(format);
MS_LOG(ERROR) << "unsupported format: " << schema::EnumNameFormat(static_cast<schema::Format>(format));
return "Format_NUM_OF_FORMAT";
}
}

View File

@ -33,7 +33,7 @@ std::string EnumNameDataType(TypeId type);
std::string GetTensorDataType(TypeId type);
std::string EnumMicroTensorFormat(schema::Format format);
std::string EnumMicroTensorFormat(mindspore::Format format);
std::string EnumMicroTensorDataType(TypeId type);

View File

@ -0,0 +1,46 @@
/**
* This is the C++ adaptation and derivative work of Myia (https://github.com/mila-iqia/myia/).
*
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CORE_IR_FORMAT_H_
#define MINDSPORE_CORE_IR_FORMAT_H_
#include <cstdint>
namespace mindspore {
enum Format : int64_t {
NCHW = 0,
NHWC = 1,
NHWC4 = 2,
HWKC = 3,
HWCK = 4,
KCHW = 5,
CKHW = 6,
KHWC = 7,
CHWK = 8,
HW = 9,
HW4 = 10,
NC = 11,
NC4 = 12,
NC4HW4 = 13,
NUM_OF_FORMAT = 14,
NCDHW = 15,
NWC = 16,
NCW = 17
};
} // namespace mindspore
#endif // MINDSPORE_CORE_IR_FORMAT_H_

View File

@ -21,6 +21,7 @@
#include "ir/dtype/type_id.h"
namespace mindspore {
enum Format : int64_t ;
namespace tensor {
/// \brief MSTensor defined tensor in MindSpore Lite.
class MS_API MSTensor {
@ -53,8 +54,20 @@ class MS_API MSTensor {
virtual TypeId data_type() const = 0;
/// \brief Set data type of current MSTensor.
///
/// \param[in] data_type Define data type, which is shown in type_id.h.
virtual void set_data_type(TypeId data_type) = 0;
/// \brief Set format of current MSTensor.
///
/// \param[in] format Define format of data, which is shown in format.h
virtual void set_format(mindspore::Format format) = 0;
/// \brief Get format of current MSTensor.
///
/// \return format, which is shown in format.h
virtual mindspore::Format format() const = 0;
/// \brief Get shape of the MindSpore Lite MSTensor.
///
/// \return A vector of int as the shape of the MindSpore Lite MSTensor.

View File

@ -19,6 +19,7 @@
#define MINDSPORE_LITE_MICRO_LIBRARY_SOURCE_TENSOR_H_
#include "include/ms_tensor.h"
#include "ir/format.h"
namespace mindspore {
namespace lite {
@ -45,6 +46,8 @@ class MTensor : public mindspore::tensor::MSTensor {
mindspore::Allocator *allocator() const override { return nullptr; }
TypeId data_type() const override { return data_type_; }
void set_data_type(TypeId data_type) override { data_type_ = data_type; }
void set_format(mindspore::Format format) override {}
mindspore::Format format() const override { return mindspore::NHWC; }
Vector<int> shape() const override { return shape_; }
void set_shape(const Vector<int> &shape) override { shape_ = shape; }
int ElementsNum() const override;

View File

@ -19,6 +19,7 @@
#define MINDSPORE_LITE_MICRO_LIBRARY_SOURCE_TENSOR_H_
#include "include/ms_tensor.h"
#include "ir/format.h"
namespace mindspore {
namespace lite {
@ -45,6 +46,8 @@ class MTensor : public mindspore::tensor::MSTensor {
AllocatorPtr allocator() const override { return nullptr; }
TypeId data_type() const override { return data_type_; }
void set_data_type(TypeId data_type) override { data_type_ = data_type; }
void set_format(mindspore::Format format) override {}
mindspore::Format format() const override { return mindspore::NHWC; }
Vector<int> shape() const override { return shape_; }
void set_shape(const Vector<int> &shape) override { shape_ = shape; }
int ElementsNum() const override;

View File

@ -54,7 +54,7 @@ int OutputTensor2TensorC(const std::vector<lite::Tensor *> &tensors, std::vector
return RET_ERROR;
}
tensor_c->data_type_ = kNumberTypeFloat32;
tensor_c->format_ = schema::Format::Format_NCHW;
tensor_c->format_ = mindspore::NCHW;
tensor_c->data_ = nullptr;
tensor_c->shape_size_ = 0;
tensors_c->push_back(tensor_c);
@ -99,7 +99,7 @@ void Tensor2TensorC(Tensor *src, TensorC *dst) {
}
void TensorC2Tensor(TensorC *src, Tensor *dst) {
dst->set_format(static_cast<schema::Format>(src->format_));
dst->set_format(static_cast<mindspore::Format>(src->format_));
dst->set_data_type(static_cast<TypeId>(src->data_type_)); // get data during the runtime period
dst->set_shape(std::vector<int>(src->shape_, src->shape_ + src->shape_size_));
}
@ -131,7 +131,7 @@ int TensorList2TensorListC(TensorList *src, TensorListC *dst) {
int TensorListC2TensorList(TensorListC *src, TensorList *dst) {
dst->set_data_type(static_cast<TypeId>(src->data_type_));
dst->set_format(static_cast<schema::Format>(src->format_));
dst->set_format(static_cast<mindspore::Format>(src->format_));
dst->set_shape(std::vector<int>(1, src->element_num_));
dst->set_tensors_data_type(static_cast<TypeId>(src->tensors_data_type_));

View File

@ -25,6 +25,7 @@
#include "src/common/utils.h"
#include "src/common/prim_util.h"
#include "src/common/graph_util.h"
#include "src/common/tensor_util.h"
#include "src/kernel_registry.h"
#include "src/lite_model.h"
#include "src/weight_decoder.h"
@ -163,7 +164,8 @@ lite::Tensor *LiteSession::ConvertTensor(const schema::Tensor &src_tensor) {
tensor_list->set_tensors_data_type(tensor_data_type);
}
} else {
dst_tensor = new (std::nothrow) Tensor(TypeId(src_tensor.dataType()), shape, src_tensor.format(), src_category);
dst_tensor = new (std::nothrow)
Tensor(TypeId(src_tensor.dataType()), shape, static_cast<mindspore::Format>(src_tensor.format()), src_category);
}
return dst_tensor;
}
@ -591,7 +593,11 @@ int LiteSession::RunGraph(const KernelCallBack &before, const KernelCallBack &af
MS_LOG(ERROR) << "Not support multi-threading";
return RET_ERROR;
}
STATUS ret;
STATUS ret = CheckGraphInputFormat(inputs_);
if (ret != RET_OK) {
MS_LOG(ERROR) << "model input's format mey be changed, which should be NHWC.";
return ret;
}
MS_ASSERT(this->context_);
if (before == nullptr && after == nullptr) {
ret = executor_->Run(this->inputs_, this->outputs_, this->kernels_, this->context_->allocator.get());

View File

@ -27,8 +27,8 @@ schema::Tensor *AttrToTensor(void *data, int data_size, bool is_array, TypeId ty
return nullptr;
}
auto dst_tensor =
(is_array ? new (std::nothrow) Tensor(type_id, {data_size}, schema::Format_NHWC, Tensor::Category::CONST_TENSOR)
: new (std::nothrow) Tensor(type_id, {}, schema::Format_NHWC, Tensor::Category::CONST_SCALAR));
(is_array ? new (std::nothrow) Tensor(type_id, {data_size}, mindspore::NHWC, Tensor::Category::CONST_TENSOR)
: new (std::nothrow) Tensor(type_id, {}, mindspore::NHWC, Tensor::Category::CONST_SCALAR));
auto dst_data = dst_tensor->MutableData();
if (dst_data == nullptr) {
MS_LOG(ERROR) << "Data from tensor is nullptr";

View File

@ -25,14 +25,14 @@ ge::Shape ConverterToNPUShape(const std::vector<int> &src_shape) {
return ge::Shape({shapes});
}
ge::Format ConverterToNPUFormat(schema::Format format) {
ge::Format ConverterToNPUFormat(mindspore::Format format) {
ge::Format ge_format;
switch (format) {
case schema::Format_NCHW:
case mindspore::NCHW:
ge_format = ge::FORMAT_NCHW;
break;
case schema::Format_NHWC:
case schema::Format_KHWC:
case mindspore::NHWC:
case mindspore::KHWC:
ge_format = ge::FORMAT_NHWC;
break;
default:

View File

@ -30,7 +30,7 @@ std::shared_ptr<ge::Tensor> ConverterToNPUTensor(Tensor *src);
hiai::op::Data *ConverterToNPUData(Tensor *src, const std::string &name);
ge::Format ConverterToNPUFormat(schema::Format format);
ge::Format ConverterToNPUFormat(mindspore::Format format);
ge::DataType ConverterToNPUDataType(TypeId type_id);

View File

@ -170,7 +170,7 @@ void UpdatePostTensors(kernel::LiteKernel *cur_kernel) {
return;
}
tensor->set_format(schema::Format_NCHW);
tensor->set_format(mindspore::NCHW);
auto nhwc_shape = tensor->shape();
tensor->set_shape({nhwc_shape[0], nhwc_shape[3], nhwc_shape[1], nhwc_shape[2]});
for (auto out_kernel : cur_kernel->out_kernels()) {

View File

@ -124,7 +124,7 @@ int NPUInsertTransformPass::InsertNode(kernel::LiteKernel *kernel, kernel::LiteK
std::vector<int> nchw_shape = {nhwc_shape[0], nhwc_shape[3], nhwc_shape[1], nhwc_shape[2]};
auto nh2nc_name = kernel_name + "_nh2nc_" + std::to_string(total++);
auto nh2nc_tensor = new (std::nothrow) Tensor(in_tensor->data_type(), nchw_shape, schema::Format_NCHW, Tensor::VAR);
auto nh2nc_tensor = new (std::nothrow) Tensor(in_tensor->data_type(), nchw_shape, mindspore::NCHW, Tensor::VAR);
if (nh2nc_tensor == nullptr) {
MS_LOG(ERROR) << "New nchw tensor failed when inserting nchw2nhwc kernel.";
return RET_ERROR;
@ -135,7 +135,7 @@ int NPUInsertTransformPass::InsertNode(kernel::LiteKernel *kernel, kernel::LiteK
all_tensors_->push_back(nh2nc_tensors[0]);
auto nc2nh_name = kernel_name + "_nc2nh_" + std::to_string(total++);
auto nc2nh_tensor = new (std::nothrow) Tensor(in_tensor->data_type(), nhwc_shape, schema::Format_NHWC, Tensor::VAR);
auto nc2nh_tensor = new (std::nothrow) Tensor(in_tensor->data_type(), nhwc_shape, mindspore::NHWC, Tensor::VAR);
if (nc2nh_tensor == nullptr) {
MS_LOG(ERROR) << "New nhwc tensor failed when inserting nhwc2nchw kernel.";
return RET_ERROR;
@ -151,7 +151,7 @@ int NPUInsertTransformPass::InsertNode(kernel::LiteKernel *kernel, kernel::LiteK
auto *nc2nh_kernel = NPUPassUtils::CreateNchw2NhwcKernel(nh2nc_tensors, nc2nh_tensors, context_, nc2nh_name);
trans_kernels->push_back(nc2nh_kernel);
auto nh2nc_perm_tensor = new Tensor(kNumberTypeInt32, {4}, schema::Format_NHWC, Tensor::CONST_TENSOR);
auto nh2nc_perm_tensor = new Tensor(kNumberTypeInt32, {4}, mindspore::NHWC, Tensor::CONST_TENSOR);
auto nh2nc_data = nh2nc_perm_tensor->MutableData();
if (nh2nc_data == nullptr) {
return RET_ERROR;
@ -160,7 +160,7 @@ int NPUInsertTransformPass::InsertNode(kernel::LiteKernel *kernel, kernel::LiteK
memcpy(nh2nc_data, nh2nc_perm_vector.data(), 4 * sizeof(int));
all_tensors_->push_back(nh2nc_perm_tensor);
auto nc2nh_perm_tensor = new Tensor(kNumberTypeInt32, {4}, schema::Format_NHWC, Tensor::CONST_TENSOR);
auto nc2nh_perm_tensor = new Tensor(kNumberTypeInt32, {4}, mindspore::NHWC, Tensor::CONST_TENSOR);
auto nc2nh_data = nc2nh_perm_tensor->MutableData();
if (nc2nh_data == nullptr) {
return RET_ERROR;

View File

@ -46,7 +46,7 @@ int NPUTransformPass::InsertPreNodes(kernel::LiteKernel *kernel, std::vector<ker
auto nhwc_shape = kernel->in_tensors()[0]->shape();
std::vector<int> nchw_shape = {nhwc_shape[0], nhwc_shape[3], nhwc_shape[1], nhwc_shape[2]};
auto tensor =
new (std::nothrow) Tensor(kernel->in_tensors()[0]->data_type(), nchw_shape, schema::Format_NCHW, Tensor::VAR);
new (std::nothrow) Tensor(kernel->in_tensors()[0]->data_type(), nchw_shape, mindspore::NCHW, Tensor::VAR);
if (tensor == nullptr) {
MS_LOG(ERROR) << "New nchw tensor failed when inserting pre nhwc2nchw kernel.";
return RET_ERROR;
@ -57,7 +57,7 @@ int NPUTransformPass::InsertPreNodes(kernel::LiteKernel *kernel, std::vector<ker
std::vector<Tensor *> pre_trans_out_tensors = {tensor};
all_tensors_->push_back(pre_trans_out_tensors[0]);
auto nh2nc_perm_tensor = new Tensor(kNumberTypeInt32, {4}, schema::Format_NHWC, Tensor::CONST_TENSOR);
auto nh2nc_perm_tensor = new Tensor(kNumberTypeInt32, {4}, mindspore::NHWC, Tensor::CONST_TENSOR);
auto nh2nc_data = nh2nc_perm_tensor->MutableData();
if (nh2nc_data == nullptr) {
return RET_ERROR;
@ -107,7 +107,7 @@ int NPUTransformPass::InsertPostNodes(kernel::LiteKernel *kernel, std::vector<ke
auto nhwc_shape = kernel->out_tensors()[0]->shape();
std::vector<int> nchw_shape = {nhwc_shape[0], nhwc_shape[3], nhwc_shape[1], nhwc_shape[2]};
auto nc2nh_tensor =
new (std::nothrow) Tensor(kernel->out_tensors()[0]->data_type(), nchw_shape, schema::Format_NCHW, Tensor::VAR);
new (std::nothrow) Tensor(kernel->out_tensors()[0]->data_type(), nchw_shape, mindspore::NCHW, Tensor::VAR);
if (nc2nh_tensor == nullptr) {
MS_LOG(ERROR) << "New nchw tensor failed when inserting post nchw2nhwc kernel.";
return RET_ERROR;
@ -119,7 +119,7 @@ int NPUTransformPass::InsertPostNodes(kernel::LiteKernel *kernel, std::vector<ke
if (is_output_kernel) {
// perm tensor
auto nc2nh_perm_tensor = new Tensor(kNumberTypeInt32, {4}, schema::Format_NHWC, Tensor::CONST_TENSOR);
auto nc2nh_perm_tensor = new Tensor(kNumberTypeInt32, {4}, mindspore::NHWC, Tensor::CONST_TENSOR);
auto nc2nh_data = nc2nh_perm_tensor->MutableData();
if (nc2nh_data == nullptr) {
return RET_ERROR;
@ -141,7 +141,7 @@ int NPUTransformPass::InsertPostNodes(kernel::LiteKernel *kernel, std::vector<ke
for (auto i = 0; i < post_insert_kernels.size(); ++i) {
auto post_insert_kernel = post_insert_kernels.at(i);
// perm tensor
auto nc2nh_perm_tensor = new Tensor(kNumberTypeInt32, {4}, schema::Format_NHWC, Tensor::CONST_TENSOR);
auto nc2nh_perm_tensor = new Tensor(kNumberTypeInt32, {4}, mindspore::NHWC, Tensor::CONST_TENSOR);
auto nc2nh_data = nc2nh_perm_tensor->MutableData();
if (nc2nh_data == nullptr) {
return RET_ERROR;

View File

@ -22,7 +22,7 @@ using mindspore::lite::RET_PARAM_INVALID;
namespace mindspore::kernel {
int DepthToSpaceBaseCPUKernel::ReSize() {
if (in_tensors_.at(0)->format() != schema::Format::Format_NHWC) {
if (in_tensors_.at(0)->format() != mindspore::NHWC) {
MS_LOG(ERROR) << "depth_to_space only support NHWC now!";
return RET_FORMAT_ERR;
}

View File

@ -62,8 +62,8 @@ static inline lite::Tensor *TensorMalloc(lite::Tensor *tensor) {
}
lite::Tensor *CreateConstTensor(lite::Tensor *tensor, const std::vector<int> &shape, const int index) {
auto new_tensor = new (std::nothrow)
lite::Tensor(tensor->data_type(), shape, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
auto new_tensor =
new (std::nothrow) lite::Tensor(tensor->data_type(), shape, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR);
if (new_tensor == nullptr) {
MS_LOG(ERROR) << "Create new_tensor failed.";
return nullptr;
@ -125,7 +125,7 @@ void GroupConvCreator::FreeGroupConvs() {
int GroupConvCreator::NewInputTensor(std::vector<lite::Tensor *> *tensors) {
auto in_tensor =
CreateVarTensor({input_shape_, schema::Format_NHWC, data_type_, lite::Tensor::Category::VAR, true}, infered_);
CreateVarTensor({input_shape_, mindspore::NHWC, data_type_, lite::Tensor::Category::VAR, true}, infered_);
if (in_tensor == nullptr) {
return lite::RET_ERROR;
}

View File

@ -25,7 +25,7 @@
namespace mindspore::kernel {
struct TensorInfo {
std::vector<int> shape_;
schema::Format format_;
mindspore::Format format_;
TypeId data_type_;
lite::Tensor::Category tensor_type_;
bool is_in_;

View File

@ -17,35 +17,34 @@
#include "src/runtime/kernel/arm/base/layout_transform.h"
#include "src/common/log_adapter.h"
using mindspore::schema::Format;
namespace mindspore::kernel {
LayoutConvertor LayoutTransformFp32(schema::Format src_format, schema::Format dst_format) {
if (src_format == schema::Format::Format_NHWC && dst_format == schema::Format::Format_NC4HW4) {
LayoutConvertor LayoutTransformFp32(mindspore::Format src_format, mindspore::Format dst_format) {
if (src_format == mindspore::NHWC && dst_format == mindspore::NC4HW4) {
return PackNHWCToNC4HW4Fp32;
} else if (src_format == schema::Format::Format_NHWC && dst_format == schema::Format::Format_NHWC4) {
} else if (src_format == mindspore::NHWC && dst_format == mindspore::NHWC4) {
return PackNHWCToNHWC4Fp32;
} else if (src_format == schema::Format::Format_NC4HW4 && dst_format == schema::Format::Format_NHWC4) {
} else if (src_format == mindspore::NC4HW4 && dst_format == mindspore::NHWC4) {
return PackNC4HW4ToNHWC4Fp32;
} else if (src_format == schema::Format::Format_NCHW && dst_format == schema::Format::Format_NC4HW4) {
} else if (src_format == mindspore::NCHW && dst_format == mindspore::NC4HW4) {
return PackNCHWToNC4HW4Fp32;
} else if (src_format == schema::Format::Format_NC4HW4 && dst_format == schema::Format::Format_NHWC) {
} else if (src_format == mindspore::NC4HW4 && dst_format == mindspore::NHWC) {
return PackNC4HW4ToNHWCFp32;
} else {
MS_LOG(ERROR) << "Unsupported transform from " << EnumNameFormat(src_format) << " to "
<< EnumNameFormat(dst_format);
MS_LOG(ERROR) << "Unsupported transform from " << EnumNameFormat(static_cast<schema::Format>(src_format)) << " to "
<< EnumNameFormat(static_cast<schema::Format>(dst_format));
return nullptr;
}
}
LayoutConvertor LayoutTransformInt8(schema::Format src_format, schema::Format dst_format) {
if (src_format == schema::Format::Format_NHWC && dst_format == schema::Format::Format_NHWC4) {
LayoutConvertor LayoutTransformInt8(mindspore::Format src_format, mindspore::Format dst_format) {
if (src_format == mindspore::NHWC && dst_format == mindspore::NHWC4) {
return PackNHWCToNHWC4Int8;
} else {
return nullptr;
}
}
LayoutConvertor LayoutTransform(TypeId data_type, schema::Format src_format, schema::Format dst_format) {
LayoutConvertor LayoutTransform(TypeId data_type, mindspore::Format src_format, mindspore::Format dst_format) {
switch (data_type) {
case kNumberTypeInt8:
return LayoutTransformInt8(src_format, dst_format);

View File

@ -21,20 +21,19 @@
#include <arm_neon.h>
#endif
#include "nnacl/pack.h"
#include "schema/ops_generated.h"
#include "src/tensor.h"
namespace mindspore::kernel {
typedef void (*LayoutConvertor)(const void *src, void *dst, int batch, int plane, int channel);
#ifdef ENABLE_FP16
LayoutConvertor LayoutTransformFp16(schema::Format src_format, schema::Format dst_format);
LayoutConvertor LayoutTransformFp16(mindspore::Format src_format, mindspore::Format dst_format);
#endif
LayoutConvertor LayoutTransformFp32(schema::Format src_format, schema::Format dst_format);
LayoutConvertor LayoutTransformFp32(mindspore::Format src_format, mindspore::Format dst_format);
LayoutConvertor LayoutTransformInt8(schema::Format src_format, schema::Format dst_format);
LayoutConvertor LayoutTransformInt8(mindspore::Format src_format, mindspore::Format dst_format);
LayoutConvertor LayoutTransform(TypeId data_type, schema::Format src_format, schema::Format dst_format);
LayoutConvertor LayoutTransform(TypeId data_type, mindspore::Format src_format, mindspore::Format dst_format);
} // namespace mindspore::kernel
#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_BASE_LAYOUT_TRANSFORM_H_

View File

@ -32,7 +32,6 @@ using mindspore::lite::KernelRegistrar;
using mindspore::lite::RET_ERROR;
using mindspore::lite::RET_OK;
using mindspore::schema::PrimitiveType_Conv2DFusion;
using mindspore::schema::Format::Format_NHWC;
namespace mindspore::kernel {
void ConvolutionDelegateFP16CPUKernel::FreeCopiedData() {

View File

@ -15,24 +15,23 @@
*/
#include "src/runtime/kernel/arm/fp16/layout_transform_fp16.h"
#include "nnacl/fp16/pack_fp16.h"
#include "schema/ops_generated.h"
#include "src/common/log_adapter.h"
namespace mindspore::kernel {
LayoutConvertor LayoutTransformFp16(schema::Format src_format, schema::Format dst_format) {
if (src_format == schema::Format::Format_NHWC && dst_format == schema::Format::Format_NC4HW4) {
LayoutConvertor LayoutTransformFp16(mindspore::Format src_format, mindspore::Format dst_format) {
if (src_format == mindspore::NHWC && dst_format == mindspore::NC4HW4) {
return PackNHWCToNC4HW4Fp16;
} else if (src_format == schema::Format::Format_NHWC && dst_format == schema::Format::Format_NHWC4) {
} else if (src_format == mindspore::NHWC && dst_format == mindspore::NHWC4) {
return PackNHWCToNHWC4Fp16;
} else if (src_format == schema::Format::Format_NC4HW4 && dst_format == schema::Format::Format_NHWC4) {
} else if (src_format == mindspore::NC4HW4 && dst_format == mindspore::NHWC4) {
return PackNC4HW4ToNHWC4Fp16;
} else if (src_format == schema::Format::Format_NCHW && dst_format == schema::Format::Format_NC4HW4) {
} else if (src_format == mindspore::NCHW && dst_format == mindspore::NC4HW4) {
return PackNCHWToNC4HW4Fp16;
} else if (src_format == schema::Format::Format_NC4HW4 && dst_format == schema::Format::Format_NHWC) {
} else if (src_format == mindspore::NC4HW4 && dst_format == mindspore::NHWC) {
return PackNC4HW4ToNHWCFp16;
} else {
MS_LOG(ERROR) << "Unsupported transform from " << EnumNameFormat(src_format) << " to "
<< EnumNameFormat(dst_format);
MS_LOG(ERROR) << "Unsupported transform from " << EnumNameFormat(static_cast<schema::Format>(src_format)) << " to "
<< EnumNameFormat(static_cast<schema::Format>(dst_format));
return nullptr;
}
}

View File

@ -18,10 +18,9 @@
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP16_LAYOUT_TRANSFORM_FP16_H_
#include "src/runtime/kernel/arm/base/layout_transform.h"
#include "schema/ops_generated.h"
namespace mindspore::kernel {
LayoutConvertor LayoutTransformFp16(schema::Format src_format, schema::Format dst_format);
LayoutConvertor LayoutTransformFp16(mindspore::Format src_format, mindspore::Format dst_format);
} // namespace mindspore::kernel
#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP16_LAYOUT_TRANSFORM_FP16_H_

View File

@ -45,7 +45,7 @@ int BatchToSpaceCPUKernel::Processinput() {
}
int BatchToSpaceCPUKernel::Init() {
MS_ASSERT(in_tensors_.at(0)->format() == schema::Format::Format_NHWC);
MS_ASSERT(in_tensors_.at(0)->format() == mindspore::NHWC);
if (!InferShapeDone()) {
return RET_OK;
}

View File

@ -41,7 +41,7 @@ int DepthToSpaceCPUKernel::Run() {
const float *input_data = reinterpret_cast<const float *>(input->data_c());
float *output_data = reinterpret_cast<float *>(output->data_c());
auto in_shape = input->shape();
if (input->format() == schema::Format::Format_NHWC) {
if (input->format() == mindspore::NHWC) {
DepthToSpaceForNHWC(input_data, output_data, in_shape.data(), param_);
return RET_OK;
} else {

View File

@ -45,7 +45,7 @@ int SpaceToDepthCPUKernel::Init() {
}
int SpaceToDepthCPUKernel::ReSize() {
if (in_tensors_.at(0)->format() != schema::Format::Format_NHWC) {
if (in_tensors_.at(0)->format() != mindspore::NHWC) {
MS_LOG(ERROR) << "space_to_depth only support NHWC now!";
return RET_FORMAT_ERR;
}
@ -90,7 +90,7 @@ int SpaceToDepthRun(void *cdata, int task_id, float lhs_scale, float rhs_scale)
int SpaceToDepthCPUKernel::Run() {
input_ptr_ = reinterpret_cast<float *>(in_tensors_.at(0)->data_c());
output_ptr_ = reinterpret_cast<float *>(out_tensors_.at(0)->data_c());
if (in_tensors_.at(0)->format() == schema::Format::Format_NHWC) {
if (in_tensors_.at(0)->format() == mindspore::NHWC) {
auto ret = static_cast<const lite::InnerContext *>(this->context_)
->thread_pool_->ParallelLaunch(SpaceToDepthRun, this, thread_h_num_);
if (ret != RET_OK) {

View File

@ -36,7 +36,7 @@ BatchToSpaceInt8CPUKernel::~BatchToSpaceInt8CPUKernel() {
}
int BatchToSpaceInt8CPUKernel::Init() {
MS_ASSERT(in_tensors_.at(0)->format() == schema::Format::Format_NHWC);
MS_ASSERT(in_tensors_.at(0)->format() == mindspore::NHWC);
in_quant_arg_ = reinterpret_cast<QuantArg *>(malloc(sizeof(QuantArg)));
if (in_quant_arg_ == nullptr) {
MS_LOG(ERROR) << "Malloc QuantArg for BatchToSpace int8 op failed!";

View File

@ -83,10 +83,10 @@ int ConvolutionBaseNPUKernel::InitBiasConst(const std::vector<lite::Tensor *> &i
MS_LOG(ERROR) << "New bias const failed.";
return RET_ERROR;
}
inputs[BIAS_INDEX]->set_format(schema::Format_NCHW);
inputs[BIAS_INDEX]->set_format(mindspore::NCHW);
auto bias_tensor = mindspore::lite::ConverterToNPUTensor(inputs[BIAS_INDEX]);
bias_->set_attr_value(bias_tensor);
inputs[BIAS_INDEX]->set_format(schema::Format_NHWC);
inputs[BIAS_INDEX]->set_format(mindspore::NHWC);
}
return RET_OK;
}

View File

@ -62,10 +62,10 @@ int FullconnectionNPUKernel::SetNPUInputs(const std::vector<lite::Tensor *> &inp
MS_LOG(ERROR) << "New weight const failed.";
return RET_ERROR;
}
inputs[1]->set_format(schema::Format_NCHW);
inputs[1]->set_format(mindspore::NCHW);
auto weight_tensor = mindspore::lite::ConverterToNPUTensor(inputs[1]);
weight_->set_attr_value(weight_tensor);
inputs[1]->set_format(schema::Format_NHWC);
inputs[1]->set_format(mindspore::NHWC);
fc_->set_input_x2(*weight_).set_attr_transpose_x2(true);
if (fc_param_->has_bias_) {

View File

@ -21,7 +21,6 @@
using mindspore::kernel::KERNEL_ARCH::kNPU;
using mindspore::lite::KernelRegistrar;
using mindspore::schema::Format_NHWC;
using mindspore::schema::PrimitiveType_ScaleFusion;
namespace mindspore::kernel {
@ -37,7 +36,7 @@ int ScaleNPUKernel::IsSupport(const std::vector<lite::Tensor *> &inputs, const s
if (scale_parameter_->axis_ < 0) {
scale_parameter_->axis_ = scale_parameter_->axis_ + inputs[0]->shape().size();
}
if (inputs.size() > 1 && inputs[0]->shape().size() == DIMS_4D && inputs[0]->format() == schema::Format_NHWC) {
if (inputs.size() > 1 && inputs[0]->shape().size() == DIMS_4D && inputs[0]->format() == mindspore::NHWC) {
// scale now only supports on axis 3
if (scale_parameter_->axis_ != 3) {
MS_LOG(ERROR) << "Npu scale axis attr only support on channel, now is " << scale_parameter_->axis_;

View File

@ -113,7 +113,7 @@ int ScaleOpenCLKernel::InitWeights() {
<< in_tensor->data_type();
return RET_ERROR;
}
} else if (in_tensor->format() == schema::Format_NHWC && scale_tensor->format() == schema::Format_NHWC) {
} else if (in_tensor->format() == mindspore::NHWC && scale_tensor->format() == mindspore::NHWC) {
if (scale_dtype == kNumberTypeFloat32 || scale_dtype == kNumberTypeFloat16) {
auto image2d_info = GpuTensorInfo(scale_tensor);
int pack_weight_size = image2d_info.ElementsC4Num;

View File

@ -52,7 +52,7 @@ int ParallelExecutor::Run(const std::vector<Tensor *> &in_tensors, const std::ve
MS_LOG(ERROR) << "Graph input tensor is nullptr";
return RET_ERROR;
}
if (inTensor->format() != schema::Format::Format_NHWC) {
if (inTensor->format() != mindspore::NHWC) {
MS_LOG(ERROR) << "Model input tensor should be NHWC";
return RET_ERROR;
}

View File

@ -14,12 +14,12 @@
* limitations under the License.
*/
#include "src/tensor.h"
#include <vector>
#include <string>
#include <utility>
#include <algorithm>
#include <functional>
#include "src/tensor.h"
#include "securec/include/securec.h"
#include "include/errorcode.h"
@ -28,7 +28,7 @@ namespace lite {
namespace {
constexpr int kMaxMallocSize = 1024 * 1024 * 300;
} // namespace
Tensor::Tensor(const TypeId data_type, std::vector<int> shape, const schema::Format &format, Category category)
Tensor::Tensor(const TypeId data_type, std::vector<int> shape, const mindspore::Format &format, Category category)
: data_type_(data_type), shape_(std::move(shape)), format_(format), category_(category) {}
int Tensor::CopyTensorData(const Tensor &src_tensor, Tensor *dst_tensor) {
@ -95,24 +95,24 @@ int32_t Tensor::Batch() const {
return RET_ERROR;
}
switch (this->format_) {
case schema::Format::Format_NHWC:
case schema::Format::Format_NHWC4:
case schema::Format::Format_NCHW:
case schema::Format::Format_NC4HW4:
case schema::Format::Format_KCHW:
case schema::Format::Format_KHWC:
case schema::Format::Format_NC:
case schema::Format::Format_NC4:
case mindspore::NHWC:
case mindspore::NHWC4:
case mindspore::NCHW:
case mindspore::NC4HW4:
case mindspore::KCHW:
case mindspore::KHWC:
case mindspore::NC:
case mindspore::NC4:
return this->shape_[0];
case schema::Format::Format_HWCK:
case schema::Format::Format_CHWK:
case mindspore::HWCK:
case mindspore::CHWK:
return this->shape_[3];
case schema::Format::Format_HWKC:
case mindspore::HWKC:
return this->shape_[2];
case schema::Format::Format_CKHW:
case mindspore::CKHW:
return this->shape_[1];
default:
MS_LOG(ERROR) << "Unsupported format: " << EnumNameFormat(this->format_);
MS_LOG(ERROR) << "Unsupported format: " << EnumNameFormat(static_cast<schema::Format>(this->format_));
return RET_ERROR;
}
}
@ -123,21 +123,21 @@ int32_t Tensor::Channel() const {
return RET_ERROR;
}
switch (this->format_) {
case schema::Format::Format_NCHW:
case schema::Format::Format_KCHW:
case schema::Format::Format_NC:
case schema::Format::Format_NC4:
case mindspore::NCHW:
case mindspore::KCHW:
case mindspore::NC:
case mindspore::NC4:
return this->shape_[1];
case schema::Format::Format_HWCK:
case mindspore::HWCK:
return this->shape_[2];
case schema::Format::Format_HWKC:
case schema::Format::Format_NHWC:
case schema::Format::Format_NHWC4:
case schema::Format::Format_NC4HW4:
case schema::Format::Format_KHWC:
case mindspore::HWKC:
case mindspore::NHWC:
case mindspore::NHWC4:
case mindspore::NC4HW4:
case mindspore::KHWC:
return this->shape_[3];
case schema::Format::Format_CKHW:
case schema::Format::Format_CHWK:
case mindspore::CKHW:
case mindspore::CHWK:
return this->shape_[0];
default:
return RET_ERROR;
@ -150,23 +150,23 @@ int32_t Tensor::Height() const {
return RET_ERROR;
}
switch (this->format_) {
case schema::Format::Format_NCHW:
case schema::Format::Format_KCHW:
case schema::Format::Format_CKHW:
case mindspore::NCHW:
case mindspore::KCHW:
case mindspore::CKHW:
return this->shape_[2];
case schema::Format::Format_NHWC:
case schema::Format::Format_NHWC4:
case schema::Format::Format_NC4HW4:
case schema::Format::Format_KHWC:
case schema::Format::Format_CHWK:
case mindspore::NHWC:
case mindspore::NHWC4:
case mindspore::NC4HW4:
case mindspore::KHWC:
case mindspore::CHWK:
return this->shape_[1];
case schema::Format::Format_HWCK:
case schema::Format::Format_HWKC:
case schema::Format::Format_HW:
case schema::Format::Format_HW4:
case mindspore::HWCK:
case mindspore::HWKC:
case mindspore::HW:
case mindspore::HW4:
return this->shape_[0];
default:
MS_LOG(ERROR) << "Unsupported format: " << EnumNameFormat(this->format_);
MS_LOG(ERROR) << "Unsupported format: " << EnumNameFormat(static_cast<schema::Format>(this->format_));
return RET_ERROR;
}
}
@ -177,20 +177,20 @@ int32_t Tensor::Width() const {
return -1;
}
switch (this->format_) {
case schema::Format::Format_NCHW:
case schema::Format::Format_KCHW:
case schema::Format::Format_CKHW:
case mindspore::NCHW:
case mindspore::KCHW:
case mindspore::CKHW:
return this->shape_[3];
case schema::Format::Format_KHWC:
case schema::Format::Format_NHWC:
case schema::Format::Format_NHWC4:
case schema::Format::Format_NC4HW4:
case schema::Format::Format_CHWK:
case mindspore::KHWC:
case mindspore::NHWC:
case mindspore::NHWC4:
case mindspore::NC4HW4:
case mindspore::CHWK:
return this->shape_[2];
case schema::Format::Format_HWCK:
case schema::Format::Format_HWKC:
case schema::Format::Format_HW:
case schema::Format::Format_HW4:
case mindspore::HWCK:
case mindspore::HWKC:
case mindspore::HW:
case mindspore::HW4:
return this->shape_[1];
default:
return RET_ERROR;
@ -199,9 +199,7 @@ int32_t Tensor::Width() const {
size_t Tensor::Size() const {
size_t element_size = DataTypeSize(this->data_type_);
auto element_num = (format_ == schema::Format::Format_NC4HW4 || format_ == schema::Format::Format_NHWC4)
? ElementsC4Num()
: ElementsNum();
auto element_num = (format_ == mindspore::NC4HW4 || format_ == mindspore::NHWC4) ? ElementsC4Num() : ElementsNum();
if (element_num < 0) {
MS_LOG(ERROR) << "Element number of tensor should large than 0 : " << element_num;
return 0;
@ -241,7 +239,7 @@ int Tensor::DimensionSize(const size_t index) const {
std::string Tensor::ToString() const {
std::ostringstream oss;
oss << "schema::Format: " << EnumNameFormat(this->format_);
oss << "schema::Format: " << EnumNameFormat(static_cast<schema::Format>(this->format_));
oss << " DataType: " << this->data_type_;
oss << " Category: " << this->category_;
oss << " Shape:";

View File

@ -24,6 +24,7 @@
#include <functional>
#include <atomic>
#include "include/ms_tensor.h"
#include "ir/format.h"
#include "src/runtime/inner_allocator.h"
#include "src/common/log_adapter.h"
@ -56,7 +57,7 @@ class Tensor : public mindspore::tensor::MSTensor {
};
Tensor() = default;
Tensor(TypeId data_type, std::vector<int> shape, const schema::Format &format = schema::Format::Format_NHWC,
Tensor(TypeId data_type, std::vector<int> shape, const mindspore::Format &format = mindspore::NHWC,
Category category = VAR);
Tensor(const Tensor &tensor) = delete;
@ -133,9 +134,9 @@ class Tensor : public mindspore::tensor::MSTensor {
void set_category(Category category) { this->category_ = category; }
void set_format(schema::Format format) { this->format_ = format; }
void set_format(mindspore::Format format) override { this->format_ = format; }
schema::Format format() const { return this->format_; }
mindspore::Format format() const override { return this->format_; }
virtual int ref_count() const { return ref_count_; }
@ -215,7 +216,7 @@ class Tensor : public mindspore::tensor::MSTensor {
void *data_ = nullptr;
TypeId data_type_;
std::vector<int> shape_;
schema::Format format_;
mindspore::Format format_;
Category category_;
std::atomic_int ref_count_ = 0;
int init_ref_count_ = 0;

View File

@ -24,7 +24,7 @@
namespace mindspore::lite {
TensorList::TensorList(std::vector<int> shape, std::vector<int> element_shape, Category category)
: Tensor(kObjectTypeTensorType, std::move(shape), schema::Format::Format_NHWC, category),
: Tensor(kObjectTypeTensorType, std::move(shape), mindspore::NHWC, category),
element_shape_(std::move(element_shape)) {}
TensorList::~TensorList() {

View File

@ -117,7 +117,7 @@ std::unique_ptr<schema::TensorT> TrainExport::CreateTensor(const mindspore::lite
auto tensorT = std::make_unique<schema::TensorT>();
tensorT->nodeType = scTensor->nodeType();
tensorT->dims = tensor->shape();
tensorT->format = tensor->format();
tensorT->format = static_cast<schema::Format>(tensor->format());
tensorT->name = tensor->tensor_name();
tensorT->refCount = 0;
tensorT->offset = 0;

View File

@ -142,7 +142,12 @@ int TrainSession::RunGraph(const KernelCallBack &before, const KernelCallBack &a
}
auto run_kernel = (train_mode_) ? train_kernels_ : inference_kernels_;
auto ret = CheckTensorsInvalid(inputs_);
auto ret = CheckGraphInputFormat(inputs_);
if (ret != RET_OK) {
MS_LOG(ERROR) << "model input's format mey be changed, which should be NHWC.";
return ret;
}
ret = CheckTensorsInvalid(inputs_);
if (ret != RET_OK) {
MS_LOG(ERROR) << "CheckInputs failed";
return ret;

View File

@ -27,7 +27,7 @@ class TestConstantOfShapeFp32 : public mindspore::CommonTest {
int ConstantOfShapeTestInit(std::vector<lite::Tensor *> *inputs_, std::vector<lite::Tensor *> *outputs_, float *a_ptr,
std::vector<int> a_shape) {
auto in_t = new lite::Tensor(kNumberTypeInt32, a_shape, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
auto in_t = new lite::Tensor(kNumberTypeInt32, a_shape, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR);
in_t->MallocData();
memcpy(in_t->MutableData(), a_ptr, sizeof(float) * in_t->ElementsNum());
inputs_->push_back(in_t);
@ -36,7 +36,7 @@ int ConstantOfShapeTestInit(std::vector<lite::Tensor *> *inputs_, std::vector<li
for (unsigned int i = 0; i < c_shape.size(); ++i) {
c_shape[i] = a_ptr[i];
}
auto out_t = new lite::Tensor(kNumberTypeFloat, c_shape, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
auto out_t = new lite::Tensor(kNumberTypeFloat, c_shape, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR);
out_t->MallocData();
outputs_->push_back(out_t);

View File

@ -60,7 +60,7 @@ void InitConvDwCreator(std::vector<lite::Tensor *> *inputs, std::vector<lite::Te
auto *input = new lite::Tensor;
input->set_data_type(kNumberTypeFloat32);
input->set_format(schema::Format_NHWC);
input->set_format(mindspore::NHWC);
input->set_shape({conv_param->input_batch_, conv_param->input_h_, conv_param->input_w_, conv_param->input_channel_});
input->MallocData();
memcpy(input->MutableData(), input_data, input_size);
@ -91,7 +91,7 @@ void InitConvDwCreator(std::vector<lite::Tensor *> *inputs, std::vector<lite::Te
output->set_data_type(kNumberTypeFloat32);
output->set_shape(
{conv_param->output_batch_, conv_param->output_h_, conv_param->output_w_, conv_param->output_channel_});
output->set_format(schema::Format_NHWC);
output->set_format(mindspore::NHWC);
output->MallocData();
memset(output->MutableData(), 0, output->ElementsNum() * sizeof(float));
outputs->push_back(output);

View File

@ -257,12 +257,12 @@ TEST_F(CropTestFp32, CropTest11) {
std::vector<int> out_shape = {1, 4, 2, 2};
std::vector<lite::Tensor *> inputs;
std::vector<lite::Tensor *> outputs;
auto in_t = new lite::Tensor(kNumberTypeFloat, in_shape, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
auto in_t = new lite::Tensor(kNumberTypeFloat, in_shape, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR);
in_t->MallocData();
memcpy(in_t->MutableData(), input, sizeof(float) * in_t->ElementsNum());
inputs.push_back(in_t);
auto out_t = new lite::Tensor(kNumberTypeFloat, out_shape, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
auto out_t = new lite::Tensor(kNumberTypeFloat, out_shape, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR);
out_t->MallocData();
outputs.push_back(out_t);

View File

@ -323,8 +323,7 @@ TEST_F(TestDeConvolutionFp32, PostConvFuncC8Test8_8) {
int DeConvTestInit1(std::vector<lite::Tensor *> *inputs_, std::vector<lite::Tensor *> *outputs_,
ConvParameter *conv_param, float **correct) {
std::vector<int> in_dims_nhwc = {1, 5, 7, 2};
auto *in_t =
new lite::Tensor(kNumberTypeFloat, in_dims_nhwc, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
auto *in_t = new lite::Tensor(kNumberTypeFloat, in_dims_nhwc, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR);
in_t->MallocData();
float in_nchw[] = {
0.39451003, 0.15045597, 0.5367726, 0.62690735, 0.113554195, 0.5402554, 0.5522764, 0.044319753, 0.25721782,
@ -341,7 +340,7 @@ int DeConvTestInit1(std::vector<lite::Tensor *> *inputs_, std::vector<lite::Tens
std::vector<int> weight_dims_nhwc = {2, 3, 3, 6};
auto *weight_t =
new lite::Tensor(kNumberTypeFloat, weight_dims_nhwc, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
new lite::Tensor(kNumberTypeFloat, weight_dims_nhwc, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR);
weight_t->MallocData();
float weight_nchw[] = {
0.061163727, -0.06261389, 0.07708351, -0.019354159, -0.3859104, -0.082844816, -0.21268463, -0.15746808,
@ -362,7 +361,7 @@ int DeConvTestInit1(std::vector<lite::Tensor *> *inputs_, std::vector<lite::Tens
weight_t->Channel(), 0, 0);
inputs_->push_back(weight_t);
auto *bias_t = new lite::Tensor(kNumberTypeFloat, {6}, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
auto *bias_t = new lite::Tensor(kNumberTypeFloat, {6}, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR);
bias_t->MallocData();
float bias[] = {-0.19064677, -0.0034778118, 0.63741624, -1.0311537, -1.0288948, 0.71384084};
memcpy(bias_t->MutableData(), bias, sizeof(float) * 6);
@ -370,7 +369,7 @@ int DeConvTestInit1(std::vector<lite::Tensor *> *inputs_, std::vector<lite::Tens
std::vector<int> output_nhwc_dims = {1, 9, 13, 6};
auto *out_t =
new lite::Tensor(kNumberTypeFloat, output_nhwc_dims, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
new lite::Tensor(kNumberTypeFloat, output_nhwc_dims, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR);
out_t->MallocData();
outputs_->push_back(out_t);
@ -499,7 +498,7 @@ TEST_F(TestDeConvolutionFp32, DeConvTest1) {
int DeConvTestInit2(std::vector<lite::Tensor *> *inputs_, std::vector<lite::Tensor *> *outputs_,
ConvParameter *conv_param, float **correct) {
auto *in_t = new lite::Tensor(kNumberTypeFloat, {1, 4, 2, 3}, schema::Format_NHWC, lite::Tensor::Category::VAR);
auto *in_t = new lite::Tensor(kNumberTypeFloat, {1, 4, 2, 3}, mindspore::NHWC, lite::Tensor::Category::VAR);
in_t->MallocData();
float in[] = {7.7566547, 19.250782, 17.923292, 13.584222, 3.3293908, 9.734102, 18.83455, -1.5142503,
-0.29382008, 18.686155, 0.087307654, 4.2010098, -2.2539594, 4.1795673, 13.142356, -3.5939367,
@ -508,7 +507,7 @@ int DeConvTestInit2(std::vector<lite::Tensor *> *inputs_, std::vector<lite::Tens
inputs_->push_back(in_t);
auto *weight_t =
new lite::Tensor(kNumberTypeFloat, {3, 3, 3, 2}, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
new lite::Tensor(kNumberTypeFloat, {3, 3, 3, 2}, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR);
weight_t->MallocData();
float weight[] = {-0.39557076, 0.15087655, 0.35216075, -0.20893791, 0.28683448, 0.08006268, 0.9830812,
0.27212173, 0.5171944, -0.0014505, 0.78694165, 0.25425306, 0.16605458, -0.06127124,
@ -522,7 +521,7 @@ int DeConvTestInit2(std::vector<lite::Tensor *> *inputs_, std::vector<lite::Tens
inputs_->push_back(weight_t);
std::vector<int> out_nhwc_dims = {1, 7, 3, 2};
auto *out_t = new lite::Tensor(kNumberTypeFloat, out_nhwc_dims, schema::Format_NHWC, lite::Tensor::Category::VAR);
auto *out_t = new lite::Tensor(kNumberTypeFloat, out_nhwc_dims, mindspore::NHWC, lite::Tensor::Category::VAR);
out_t->MallocData();
outputs_->push_back(out_t);
@ -568,7 +567,7 @@ TEST_F(TestDeConvolutionFp32, DeConvTest2) {
int DeConvTestInit3(std::vector<lite::Tensor *> *inputs_, std::vector<lite::Tensor *> *outputs_,
ConvParameter *conv_param, float **correct) {
std::vector<int> in_dims_nhwc = {1, 3, 3, 2};
auto *in_t = new lite::Tensor(kNumberTypeFloat, in_dims_nhwc, schema::Format_NHWC, lite::Tensor::Category::VAR);
auto *in_t = new lite::Tensor(kNumberTypeFloat, in_dims_nhwc, mindspore::NHWC, lite::Tensor::Category::VAR);
in_t->MallocData();
float in_nchw[] = {0.10411751, 0.24034509, 0.71456534, 0.75286126, 0.9778457, 0.21043599,
0.26498786, 0.6701024, 0.9744634, 0.49075702, 0.03877404, 0.48646277,
@ -579,7 +578,7 @@ int DeConvTestInit3(std::vector<lite::Tensor *> *inputs_, std::vector<lite::Tens
std::vector<int> w_dims_nhwc = {2, 2, 2, 2};
auto *weight_t =
new lite::Tensor(kNumberTypeFloat, w_dims_nhwc, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
new lite::Tensor(kNumberTypeFloat, w_dims_nhwc, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR);
weight_t->MallocData();
float w_nchw[] = {-0.108016446, -0.44254777, 0.29249913, 0.18764605, 1.1250675, 0.29441583,
-0.34362152, 0.7557833, 0.16503833, 0.2418737, -0.26612744, 0.5072577,
@ -589,7 +588,7 @@ int DeConvTestInit3(std::vector<lite::Tensor *> *inputs_, std::vector<lite::Tens
inputs_->push_back(weight_t);
std::vector<int> out_dims_nhwc = {1, 9, 9, 2};
auto *out_t = new lite::Tensor(kNumberTypeFloat, out_dims_nhwc, schema::Format_NC4HW4, lite::Tensor::Category::VAR);
auto *out_t = new lite::Tensor(kNumberTypeFloat, out_dims_nhwc, mindspore::NC4HW4, lite::Tensor::Category::VAR);
out_t->MallocData();
outputs_->push_back(out_t);
@ -648,7 +647,7 @@ int DeConvTestInit4(std::vector<lite::Tensor *> *inputs_, std::vector<lite::Tens
ConvParameter *conv_param, float **correct) {
size_t buffer_size;
std::vector<int> in_nhwc_dims = {1, 300, 300, 30};
auto *in_t = new lite::Tensor(kNumberTypeFloat, in_nhwc_dims, schema::Format_NHWC, lite::Tensor::Category::VAR);
auto *in_t = new lite::Tensor(kNumberTypeFloat, in_nhwc_dims, mindspore::NHWC, lite::Tensor::Category::VAR);
in_t->MallocData();
std::string in_nhwc_path = "./deconv/deconv_fp32_nhwc_input1.bin";
auto in_nhwc = reinterpret_cast<float *>(mindspore::lite::ReadFile(in_nhwc_path.c_str(), &buffer_size));
@ -657,7 +656,7 @@ int DeConvTestInit4(std::vector<lite::Tensor *> *inputs_, std::vector<lite::Tens
std::vector<int> w_nhwc_dims = {30, 3, 3, 40};
auto *weight_t =
new lite::Tensor(kNumberTypeFloat, w_nhwc_dims, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
new lite::Tensor(kNumberTypeFloat, w_nhwc_dims, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR);
weight_t->MallocData();
std::string weight_path = "./deconv/deconv_fp32_nchw_weight1.bin";
auto weight_nchw = reinterpret_cast<float *>(mindspore::lite::ReadFile(weight_path.c_str(), &buffer_size));
@ -665,7 +664,7 @@ int DeConvTestInit4(std::vector<lite::Tensor *> *inputs_, std::vector<lite::Tens
weight_t->Channel(), 0, 0);
inputs_->push_back(weight_t);
auto *bias_t = new lite::Tensor(kNumberTypeFloat, {40}, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
auto *bias_t = new lite::Tensor(kNumberTypeFloat, {40}, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR);
bias_t->MallocData();
std::string bias_path = "./deconv/deconv_fp32_nchw_bias1.bin";
auto bias = mindspore::lite::ReadFile(bias_path.c_str(), &buffer_size);
@ -673,7 +672,7 @@ int DeConvTestInit4(std::vector<lite::Tensor *> *inputs_, std::vector<lite::Tens
inputs_->push_back(bias_t);
std::vector<int> out_nhwc_dims = {1, 302, 302, 40};
auto *out_t = new lite::Tensor(kNumberTypeFloat, out_nhwc_dims, schema::Format_NHWC, lite::Tensor::Category::VAR);
auto *out_t = new lite::Tensor(kNumberTypeFloat, out_nhwc_dims, mindspore::NHWC, lite::Tensor::Category::VAR);
out_t->MallocData();
outputs_->push_back(out_t);

View File

@ -35,7 +35,7 @@ void DetectionPostProcessTestInit(std::vector<lite::Tensor *> *inputs_, std::vec
reinterpret_cast<float *>(mindspore::lite::ReadFile(input_boxes_path.c_str(), &input_boxes_size));
auto *input_boxes = new lite::Tensor;
input_boxes->set_data_type(kNumberTypeFloat32);
input_boxes->set_format(schema::Format_NHWC);
input_boxes->set_format(mindspore::NHWC);
input_boxes->set_shape({1, 1917, 4});
input_boxes->MallocData();
memcpy(input_boxes->MutableData(), input_boxes_data, input_boxes_size);
@ -47,7 +47,7 @@ void DetectionPostProcessTestInit(std::vector<lite::Tensor *> *inputs_, std::vec
reinterpret_cast<float *>(mindspore::lite::ReadFile(input_scores_path.c_str(), &input_scores_size));
auto *input_scores = new lite::Tensor;
input_scores->set_data_type(kNumberTypeFloat32);
input_scores->set_format(schema::Format_NHWC);
input_scores->set_format(mindspore::NHWC);
input_scores->set_shape({1, 1917, 91});
input_scores->MallocData();
memcpy(input_scores->MutableData(), input_scores_data, input_scores_size);
@ -63,7 +63,7 @@ void DetectionPostProcessTestInit(std::vector<lite::Tensor *> *inputs_, std::vec
quant_arg.scale = 0.00645306;
input_anchors->AddQuantParam(quant_arg);
input_anchors->set_data_type(kNumberTypeUInt8);
input_anchors->set_format(schema::Format_NHWC);
input_anchors->set_format(mindspore::NHWC);
input_anchors->set_shape({1917, 4});
input_anchors->MallocData();
memcpy(input_anchors->MutableData(), input_anchors_data, input_anchors_size);
@ -72,28 +72,28 @@ void DetectionPostProcessTestInit(std::vector<lite::Tensor *> *inputs_, std::vec
auto *output_boxes = new lite::Tensor;
output_boxes->set_data_type(kNumberTypeFloat32);
output_boxes->set_shape({1, 10, 4});
output_boxes->set_format(schema::Format_NHWC);
output_boxes->set_format(mindspore::NHWC);
output_boxes->MallocData();
memset(output_boxes->MutableData(), 0, output_boxes->ElementsNum() * sizeof(float));
auto *output_classes = new lite::Tensor;
output_classes->set_data_type(kNumberTypeFloat32);
output_classes->set_shape({1, 10});
output_classes->set_format(schema::Format_NHWC);
output_classes->set_format(mindspore::NHWC);
output_classes->MallocData();
memset(output_classes->MutableData(), 0, output_classes->ElementsNum() * sizeof(float));
auto *output_scores = new lite::Tensor;
output_scores->set_data_type(kNumberTypeFloat32);
output_scores->set_shape({1, 10});
output_scores->set_format(schema::Format_NHWC);
output_scores->set_format(mindspore::NHWC);
output_scores->MallocData();
memset(output_scores->MutableData(), 0, output_scores->ElementsNum() * sizeof(float));
auto *output_num_det = new lite::Tensor;
output_num_det->set_data_type(kNumberTypeFloat32);
output_num_det->set_shape({1});
output_num_det->set_format(schema::Format_NHWC);
output_num_det->set_format(mindspore::NHWC);
output_num_det->MallocData();
memset(output_num_det->MutableData(), 0, output_num_det->ElementsNum() * sizeof(float));

View File

@ -30,14 +30,13 @@ class TestEluFp32 : public mindspore::CommonTest {
};
void EluTestInit(std::vector<Tensor *> *inputs_, std::vector<Tensor *> *outputs_, EluParameter *elu_param) {
Tensor *in_t_first =
new Tensor(kNumberTypeFloat32, {6, 2}, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
Tensor *in_t_first = new Tensor(kNumberTypeFloat32, {6, 2}, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR);
in_t_first->MallocData();
float in_first[] = {-1, 2, -3, 4, -5, 6, -7, 8, -9, 10, -11, 0};
memcpy(in_t_first->MutableData(), in_first, sizeof(float) * in_t_first->ElementsNum());
inputs_->push_back(in_t_first);
Tensor *outputs_t = new Tensor(kNumberTypeFloat32, {6, 2}, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
Tensor *outputs_t = new Tensor(kNumberTypeFloat32, {6, 2}, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR);
outputs_t->MallocData();
outputs_->push_back(outputs_t);

View File

@ -31,28 +31,25 @@ class TestEmbeddingLookupFp32 : public mindspore::CommonTest {
void ElTestInit(std::vector<Tensor *> *inputs_, std::vector<Tensor *> *outputs_,
EmbeddingLookupParameter *embedding_lookup_param) {
Tensor *in_t_first =
new Tensor(kNumberTypeFloat32, {6, 2}, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
Tensor *in_t_first = new Tensor(kNumberTypeFloat32, {6, 2}, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR);
in_t_first->MallocData();
float in_first[] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12};
memcpy(in_t_first->MutableData(), in_first, sizeof(float) * in_t_first->ElementsNum());
inputs_->push_back(in_t_first);
Tensor *in_t_second =
new Tensor(kNumberTypeFloat32, {4, 2}, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
Tensor *in_t_second = new Tensor(kNumberTypeFloat32, {4, 2}, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR);
in_t_second->MallocData();
float in_second[] = {1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8};
memcpy(in_t_second->MutableData(), in_second, sizeof(float) * in_t_second->ElementsNum());
inputs_->push_back(in_t_second);
Tensor *ids_t = new Tensor(kNumberTypeFloat32, {2, 3}, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
Tensor *ids_t = new Tensor(kNumberTypeFloat32, {2, 3}, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR);
ids_t->MallocData();
int ids[] = {1, 9, 2, 4, 6, 7};
memcpy(ids_t->MutableData(), ids, sizeof(int) * ids_t->ElementsNum());
inputs_->push_back(ids_t);
Tensor *outputs_t =
new Tensor(kNumberTypeInt32, {2, 3, 2}, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
Tensor *outputs_t = new Tensor(kNumberTypeInt32, {2, 3, 2}, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR);
outputs_t->MallocData();
outputs_->push_back(outputs_t);

View File

@ -34,14 +34,14 @@ class TestFcFp32 : public mindspore::CommonTest {
int FcTestInit1(std::vector<lite::Tensor *> *inputs_, std::vector<lite::Tensor *> *outputs_,
MatMulParameter *matmal_param, float **correct) {
auto *in_t = new Tensor(kNumberTypeFloat, {2, 2, 2, 2}, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
auto *in_t = new Tensor(kNumberTypeFloat, {2, 2, 2, 2}, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR);
in_t->MallocData();
float in[] = {-3.2366564, -4.7733846, -7.8329225, 16.146885, 5.060793, -6.1471, -1.7680453, -6.5721383,
17.87506, -5.1192183, 10.742863, 1.4536934, 19.693445, 19.45783, 5.063163, 0.5234792};
memcpy(in_t->MutableData(), in, sizeof(float) * in_t->ElementsNum());
inputs_->push_back(in_t);
auto *weight_t = new Tensor(kNumberTypeFloat, {3, 8}, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
auto *weight_t = new Tensor(kNumberTypeFloat, {3, 8}, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR);
weight_t->MallocData();
float weight[] = {-0.0024438887, 0.0006738146, -0.008169129, 0.0021510671, -0.012470592, -0.0053063435,
0.006050155, 0.008656233, 0.012911413, -0.0028635843, -0.00034080597, -0.0010622552,
@ -50,13 +50,13 @@ int FcTestInit1(std::vector<lite::Tensor *> *inputs_, std::vector<lite::Tensor *
memcpy(weight_t->MutableData(), weight, sizeof(float) * weight_t->ElementsNum());
inputs_->push_back(weight_t);
auto *bias_t = new Tensor(kNumberTypeFloat, {3}, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
auto *bias_t = new Tensor(kNumberTypeFloat, {3}, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR);
bias_t->MallocData();
float bias[] = {1.6103756, -0.9872417, 0.546849};
memcpy(bias_t->MutableData(), bias, sizeof(float) * bias_t->ElementsNum());
inputs_->push_back(bias_t);
auto *out_t = new Tensor(kNumberTypeFloat, {2, 3}, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
auto *out_t = new Tensor(kNumberTypeFloat, {2, 3}, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR);
out_t->MallocData();
outputs_->push_back(out_t);
@ -95,28 +95,28 @@ int FcTestInit2(std::vector<lite::Tensor *> *inputs_, std::vector<lite::Tensor *
MatMulParameter *matmal_param, float **correct) {
size_t buffer_size;
auto *in_t = new Tensor(kNumberTypeFloat, {20, 4, 2, 10}, schema::Format_NCHW, lite::Tensor::Category::CONST_TENSOR);
auto *in_t = new Tensor(kNumberTypeFloat, {20, 4, 2, 10}, mindspore::NCHW, lite::Tensor::Category::CONST_TENSOR);
in_t->MallocData();
std::string in_path = "./matmul/FcFp32_input1.bin";
auto in_data = mindspore::lite::ReadFile(in_path.c_str(), &buffer_size);
memcpy(in_t->MutableData(), in_data, buffer_size);
inputs_->push_back(in_t);
auto *weight_t = new Tensor(kNumberTypeFloat, {30, 80}, schema::Format_NCHW, lite::Tensor::Category::CONST_TENSOR);
auto *weight_t = new Tensor(kNumberTypeFloat, {30, 80}, mindspore::NCHW, lite::Tensor::Category::CONST_TENSOR);
weight_t->MallocData();
std::string weight_path = "./matmul/FcFp32_weight1.bin";
auto w_data = mindspore::lite::ReadFile(weight_path.c_str(), &buffer_size);
memcpy(weight_t->MutableData(), w_data, buffer_size);
inputs_->push_back(weight_t);
auto *bias_t = new Tensor(kNumberTypeFloat, {30}, schema::Format_NCHW, lite::Tensor::Category::CONST_TENSOR);
auto *bias_t = new Tensor(kNumberTypeFloat, {30}, mindspore::NCHW, lite::Tensor::Category::CONST_TENSOR);
bias_t->MallocData();
std::string bias_path = "./matmul/FcFp32_bias1.bin";
auto bias_data = mindspore::lite::ReadFile(bias_path.c_str(), &buffer_size);
memcpy(bias_t->MutableData(), bias_data, buffer_size);
inputs_->push_back(bias_t);
auto *out_t = new Tensor(kNumberTypeFloat, {20, 30}, schema::Format_NCHW, lite::Tensor::Category::CONST_TENSOR);
auto *out_t = new Tensor(kNumberTypeFloat, {20, 30}, mindspore::NCHW, lite::Tensor::Category::CONST_TENSOR);
out_t->MallocData();
outputs_->push_back(out_t);
@ -153,13 +153,13 @@ TEST_F(TestFcFp32, FcTest2) {
void FcTestInit3(std::vector<lite::Tensor *> *inputs_, std::vector<lite::Tensor *> *outputs_,
MatMulParameter *matmal_param, float **correct) {
auto *in_t = new Tensor(kNumberTypeFloat, {1, 1, 1, 20}, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
auto *in_t = new Tensor(kNumberTypeFloat, {1, 1, 1, 20}, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR);
in_t->MallocData();
float in[] = {1, 0, 3, 0, 4, 5, 2, 5, 2, 5, 1, 5, 0, 1, 2, 0, 2, 1, 0, 5};
memcpy(in_t->MutableData(), in, sizeof(float) * in_t->ElementsNum());
inputs_->push_back(in_t);
auto *weight_t = new Tensor(kNumberTypeFloat, {16, 20}, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
auto *weight_t = new Tensor(kNumberTypeFloat, {16, 20}, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR);
weight_t->MallocData();
float weight[] = {0, 5, 5, 3, 0, 5, 3, 1, 0, 1, 3, 0, 5, 5, 2, 4, 0, 1, 1, 2, 3, 0, 5, 5, 4, 4, 1, 4, 1, 1, 5, 3,
3, 1, 0, 3, 1, 2, 4, 5, 3, 4, 4, 0, 3, 5, 0, 3, 4, 1, 0, 1, 3, 4, 0, 5, 2, 5, 0, 4, 2, 2, 2, 2,
@ -174,7 +174,7 @@ void FcTestInit3(std::vector<lite::Tensor *> *inputs_, std::vector<lite::Tensor
memcpy(weight_t->MutableData(), weight, sizeof(float) * weight_t->ElementsNum());
inputs_->push_back(weight_t);
auto *out_t = new Tensor(kNumberTypeFloat, {1, 16}, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
auto *out_t = new Tensor(kNumberTypeFloat, {1, 16}, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR);
out_t->MallocData();
outputs_->push_back(out_t);

View File

@ -18,7 +18,6 @@
#include "mindspore/lite/src/runtime/kernel/arm/fp32/l2_norm_fp32.h"
#include "src/kernel_registry.h"
#include "src/lite_kernel.h"
using mindspore::schema::Format_NHWC;
namespace mindspore {
class TestL2NormFp32 : public mindspore::CommonTest {
@ -49,7 +48,7 @@ void TestL2NormFp32::TearDown() {
void TestL2NormFp32::Init(const std::vector<int> &input_shape, const std::vector<int> &output_shape, float *input_data,
float *output_data, const int axis_num, ActType activation_type, const int thread_num) {
in_tensor_.set_data_type(kNumberTypeFloat32);
in_tensor_.set_format(Format_NHWC);
in_tensor_.set_format(mindspore::NHWC);
in_tensor_.set_shape(input_shape);
out_tensor_.set_data_type(kNumberTypeFloat32);
out_tensor_.set_shape(output_shape);

View File

@ -51,7 +51,7 @@ void InitLstmForwardCreator(std::vector<lite::Tensor *> *inputs, std::vector<lit
-0.29129684, -0.27841482, 0.01964372, -0.42543447, 0.41720617, -0.30054367};
auto *weight_i = new lite::Tensor;
weight_i->set_data_type(kNumberTypeFloat32);
weight_i->set_format(schema::Format_NHWC);
weight_i->set_format(mindspore::NHWC);
weight_i->set_shape({1, lstm_param->hidden_size_ * 4, lstm_param->input_size_});
weight_i->MallocData();
memcpy(weight_i->MutableData(), weight_i_data.data(), weight_i_data.size() * sizeof(float));
@ -64,7 +64,7 @@ void InitLstmForwardCreator(std::vector<lite::Tensor *> *inputs, std::vector<lit
0.34080023, 0.49467337, 0.23473483, 0.01759732, 0.04691631, 0.45574808, -0.29481018, 0.29442167, -0.36718};
auto *weight_h = new lite::Tensor;
weight_h->set_data_type(kNumberTypeFloat32);
weight_h->set_format(schema::Format_NHWC);
weight_h->set_format(mindspore::NHWC);
weight_h->set_shape({1, lstm_param->hidden_size_ * 4, lstm_param->hidden_size_});
weight_h->MallocData();
memcpy(weight_h->MutableData(), weight_h_data.data(), weight_h_data.size() * sizeof(float));
@ -76,7 +76,7 @@ void InitLstmForwardCreator(std::vector<lite::Tensor *> *inputs, std::vector<lit
-0.23593256, -0.3911457, 0.51128435, 0.5128727, 0.253451, -0.51891875};
auto *bias = new lite::Tensor;
bias->set_data_type(kNumberTypeFloat32);
bias->set_format(schema::Format_NHWC);
bias->set_format(mindspore::NHWC);
bias->set_shape({1, lstm_param->hidden_size_ * 4 * 2});
bias->MallocData();
memcpy(bias->MutableData(), bias_data.data(), bias_data.size() * sizeof(float));
@ -85,7 +85,7 @@ void InitLstmForwardCreator(std::vector<lite::Tensor *> *inputs, std::vector<lit
std::vector<float> state_data = {0, 0, 0};
auto *state = new lite::Tensor;
state->set_data_type(kNumberTypeFloat32);
state->set_format(schema::Format_NHWC);
state->set_format(mindspore::NHWC);
state->set_shape({1, lstm_param->batch_, lstm_param->hidden_size_});
state->MallocData();
memcpy(state->MutableData(), state_data.data(), state_data.size() * sizeof(float));
@ -101,21 +101,21 @@ void InitLstmForwardCreator(std::vector<lite::Tensor *> *inputs, std::vector<lit
auto *output = new lite::Tensor;
output->set_data_type(kNumberTypeFloat32);
output->set_shape({lstm_param->seq_len_, lstm_param->batch_, lstm_param->hidden_size_});
output->set_format(schema::Format_NHWC);
output->set_format(mindspore::NHWC);
output->MallocData();
memset(output->MutableData(), 0, output->ElementsNum() * sizeof(float));
auto *cell_state = new lite::Tensor;
cell_state->set_data_type(kNumberTypeFloat32);
cell_state->set_shape({1, lstm_param->batch_, lstm_param->hidden_size_});
cell_state->set_format(schema::Format_NHWC);
cell_state->set_format(mindspore::NHWC);
cell_state->MallocData();
memset(cell_state->MutableData(), 0, cell_state->ElementsNum() * sizeof(float));
auto *hidden_state = new lite::Tensor;
hidden_state->set_data_type(kNumberTypeFloat32);
hidden_state->set_shape({1, lstm_param->batch_, lstm_param->hidden_size_});
hidden_state->set_format(schema::Format_NHWC);
hidden_state->set_format(mindspore::NHWC);
hidden_state->MallocData();
memset(hidden_state->MutableData(), 0, hidden_state->ElementsNum() * sizeof(float));
@ -200,7 +200,7 @@ void InitLstmBackwardCreator(std::vector<lite::Tensor *> *inputs, std::vector<li
0.3653409, 0.386924, 0.3170289, -0.08830952, -0.31105759, 0.3110240, 0.15174299, 0.287579894};
auto *weight_i = new lite::Tensor;
weight_i->set_data_type(kNumberTypeFloat32);
weight_i->set_format(schema::Format_NHWC);
weight_i->set_format(mindspore::NHWC);
weight_i->set_shape({2, lstm_param->hidden_size_ * 4, lstm_param->input_size_});
weight_i->MallocData();
memcpy(weight_i->MutableData(), weight_i_data.data(), weight_i_data.size() * sizeof(float));
@ -218,7 +218,7 @@ void InitLstmBackwardCreator(std::vector<lite::Tensor *> *inputs, std::vector<li
0.35996592, -0.201961308, -0.16323345, 0.119177639, -0.12677872, -0.175229549, -0.160024613, -0.21058899};
auto *weight_h = new lite::Tensor;
weight_h->set_data_type(kNumberTypeFloat32);
weight_h->set_format(schema::Format_NHWC);
weight_h->set_format(mindspore::NHWC);
weight_h->set_shape({2, lstm_param->hidden_size_ * 4, lstm_param->hidden_size_});
weight_h->MallocData();
memcpy(weight_h->MutableData(), weight_h_data.data(), weight_h_data.size() * sizeof(float));
@ -233,7 +233,7 @@ void InitLstmBackwardCreator(std::vector<lite::Tensor *> *inputs, std::vector<li
0.23712641, -0.052937567, 0.272351622, 0.42767739, 0.303884744, -0.46025499, -0.43985402, 0.256422877};
auto *bias = new lite::Tensor;
bias->set_data_type(kNumberTypeFloat32);
bias->set_format(schema::Format_NHWC);
bias->set_format(mindspore::NHWC);
bias->set_shape({2, lstm_param->hidden_size_ * 4 * 2});
bias->MallocData();
memcpy(bias->MutableData(), bias_data.data(), bias_data.size() * sizeof(float));
@ -242,7 +242,7 @@ void InitLstmBackwardCreator(std::vector<lite::Tensor *> *inputs, std::vector<li
std::vector<float> state_data = {0, 0, 0, 0, 0, 0};
auto *state = new lite::Tensor;
state->set_data_type(kNumberTypeFloat32);
state->set_format(schema::Format_NHWC);
state->set_format(mindspore::NHWC);
state->set_shape({2, lstm_param->batch_, lstm_param->hidden_size_});
state->MallocData();
memcpy(state->MutableData(), state_data.data(), state_data.size() * sizeof(float));
@ -258,21 +258,21 @@ void InitLstmBackwardCreator(std::vector<lite::Tensor *> *inputs, std::vector<li
auto *output = new lite::Tensor;
output->set_data_type(kNumberTypeFloat32);
output->set_shape({lstm_param->seq_len_, 2, lstm_param->batch_, lstm_param->hidden_size_});
output->set_format(schema::Format_NHWC);
output->set_format(mindspore::NHWC);
output->MallocData();
memset(output->MutableData(), 0, output->ElementsNum() * sizeof(float));
auto *cell_state = new lite::Tensor;
cell_state->set_data_type(kNumberTypeFloat32);
cell_state->set_shape({2, lstm_param->batch_, lstm_param->hidden_size_});
cell_state->set_format(schema::Format_NHWC);
cell_state->set_format(mindspore::NHWC);
cell_state->MallocData();
memset(cell_state->MutableData(), 0, cell_state->ElementsNum() * sizeof(float));
auto *hidden_state = new lite::Tensor;
hidden_state->set_data_type(kNumberTypeFloat32);
hidden_state->set_shape({2, lstm_param->batch_, lstm_param->hidden_size_});
hidden_state->set_format(schema::Format_NHWC);
hidden_state->set_format(mindspore::NHWC);
hidden_state->MallocData();
memset(hidden_state->MutableData(), 0, hidden_state->ElementsNum() * sizeof(float));

View File

@ -70,18 +70,17 @@ TEST_F(TestMatMulFp32, Row2Col8Test2) {
int MMTestInit(std::vector<lite::Tensor *> *inputs_, std::vector<lite::Tensor *> *outputs_, float *a_ptr, float *b_ptr,
const std::vector<int> &a_shape, const std::vector<int> &b_shape, const std::vector<int> &c_shape) {
auto in_t = new lite::Tensor(kNumberTypeFloat, a_shape, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
auto in_t = new lite::Tensor(kNumberTypeFloat, a_shape, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR);
in_t->MallocData();
memcpy(in_t->MutableData(), a_ptr, sizeof(float) * in_t->ElementsNum());
inputs_->push_back(in_t);
auto weight_t =
new lite::Tensor(kNumberTypeFloat, b_shape, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
auto weight_t = new lite::Tensor(kNumberTypeFloat, b_shape, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR);
weight_t->MallocData();
memcpy(weight_t->MutableData(), b_ptr, sizeof(float) * weight_t->ElementsNum());
inputs_->push_back(weight_t);
auto out_t = new lite::Tensor(kNumberTypeFloat, c_shape, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
auto out_t = new lite::Tensor(kNumberTypeFloat, c_shape, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR);
out_t->MallocData();
outputs_->push_back(out_t);
@ -91,24 +90,22 @@ int MMTestInit(std::vector<lite::Tensor *> *inputs_, std::vector<lite::Tensor *>
int MMTestInit2(std::vector<lite::Tensor *> *inputs_, std::vector<lite::Tensor *> *outputs_, float *a_ptr, float *b_ptr,
float *bias_ptr, const std::vector<int> &a_shape, const std::vector<int> &b_shape,
const std::vector<int> &bias_shape, const std::vector<int> &c_shape) {
auto in_t = new lite::Tensor(kNumberTypeFloat, a_shape, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
auto in_t = new lite::Tensor(kNumberTypeFloat, a_shape, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR);
in_t->MallocData();
memcpy(in_t->MutableData(), a_ptr, sizeof(float) * in_t->ElementsNum());
inputs_->push_back(in_t);
auto weight_t =
new lite::Tensor(kNumberTypeFloat, b_shape, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
auto weight_t = new lite::Tensor(kNumberTypeFloat, b_shape, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR);
weight_t->MallocData();
memcpy(weight_t->MutableData(), b_ptr, sizeof(float) * weight_t->ElementsNum());
inputs_->push_back(weight_t);
auto bias_t =
new lite::Tensor(kNumberTypeFloat, bias_shape, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
auto bias_t = new lite::Tensor(kNumberTypeFloat, bias_shape, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR);
bias_t->MallocData();
memcpy(bias_t->MutableData(), bias_ptr, sizeof(float) * bias_t->ElementsNum());
inputs_->push_back(bias_t);
auto out_t = new lite::Tensor(kNumberTypeFloat, c_shape, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
auto out_t = new lite::Tensor(kNumberTypeFloat, c_shape, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR);
out_t->MallocData();
outputs_->push_back(out_t);

View File

@ -19,7 +19,6 @@
#include "mindspore/lite/src/runtime/kernel/arm/fp32/non_max_suppression_fp32.h"
#include "src/kernel_registry.h"
#include "src/lite_kernel.h"
using mindspore::schema::Format_NHWC;
namespace mindspore {
class TestNMSFp32 : public mindspore::CommonTest {
@ -63,12 +62,12 @@ void TestNMSFp32::Init(const std::vector<int> &box_tensor_shape, float *box_data
const std::vector<int> &score_tensor_shape, float *score_data, int32_t max_output,
float iou_threshold, float score_threshold, int center_box_point) {
box_tensor_.set_data_type(kNumberTypeFloat32);
box_tensor_.set_format(Format_NHWC);
box_tensor_.set_format(mindspore::NHWC);
box_tensor_.set_shape(box_tensor_shape);
box_tensor_.set_data(box_data);
score_tensor_.set_data_type(kNumberTypeFloat32);
score_tensor_.set_format(Format_NHWC);
score_tensor_.set_format(mindspore::NHWC);
score_tensor_.set_shape(score_tensor_shape);
score_tensor_.set_data(score_data);

View File

@ -21,7 +21,6 @@
#include "mindspore/lite/src/kernel_registry.h"
#include "schema/ops_generated.h"
using mindspore::schema::Format_NHWC;
using mindspore::schema::PaddingMode;
using mindspore::schema::PaddingMode_CONSTANT;
using mindspore::schema::PaddingMode_REFLECT;
@ -61,7 +60,7 @@ void TestPadFp32::Prepare(const std::vector<int> &input_shape, const std::vector
float *output_data, PaddingMode mode, int *paddings, int padding_length, float constant_value,
const int thread_num) {
in_tensor_.set_data_type(kNumberTypeFloat32);
in_tensor_.set_format(Format_NHWC);
in_tensor_.set_format(mindspore::NHWC);
in_tensor_.set_shape(input_shape);
out_tensor_.set_data_type(kNumberTypeFloat32);
out_tensor_.set_shape(output_shape);

View File

@ -28,18 +28,17 @@ class TestPowerFp32 : public mindspore::CommonTest {
int PowerTestInit(std::vector<lite::Tensor *> *inputs_, std::vector<lite::Tensor *> *outputs_, float *a_ptr,
float *b_ptr, const std::vector<int> &a_shape, const std::vector<int> &b_shape,
const std::vector<int> &c_shape) {
auto in_t = new lite::Tensor(kNumberTypeFloat, a_shape, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
auto in_t = new lite::Tensor(kNumberTypeFloat, a_shape, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR);
in_t->MallocData();
memcpy(in_t->MutableData(), a_ptr, sizeof(float) * in_t->ElementsNum());
inputs_->push_back(in_t);
auto weight_t =
new lite::Tensor(kNumberTypeFloat, b_shape, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
auto weight_t = new lite::Tensor(kNumberTypeFloat, b_shape, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR);
weight_t->MallocData();
memcpy(weight_t->MutableData(), b_ptr, sizeof(float) * weight_t->ElementsNum());
inputs_->push_back(weight_t);
auto out_t = new lite::Tensor(kNumberTypeFloat, c_shape, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
auto out_t = new lite::Tensor(kNumberTypeFloat, c_shape, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR);
out_t->MallocData();
outputs_->push_back(out_t);
@ -48,12 +47,12 @@ int PowerTestInit(std::vector<lite::Tensor *> *inputs_, std::vector<lite::Tensor
int PowerTestInit2(std::vector<lite::Tensor *> *inputs_, std::vector<lite::Tensor *> *outputs_, float *a_ptr,
const std::vector<int> &a_shape, const std::vector<int> &c_shape) {
auto in_t = new lite::Tensor(kNumberTypeFloat, a_shape, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
auto in_t = new lite::Tensor(kNumberTypeFloat, a_shape, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR);
in_t->MallocData();
memcpy(in_t->MutableData(), a_ptr, sizeof(float) * in_t->ElementsNum());
inputs_->push_back(in_t);
auto out_t = new lite::Tensor(kNumberTypeFloat, c_shape, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
auto out_t = new lite::Tensor(kNumberTypeFloat, c_shape, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR);
out_t->MallocData();
outputs_->push_back(out_t);

View File

@ -20,7 +20,6 @@
#include "mindspore/lite/src/tensor.h"
#include "nnacl/resize_parameter.h"
#include "schema/ops_generated.h"
using mindspore::schema::Format_NHWC;
namespace mindspore {
@ -54,7 +53,7 @@ void TestResizeBilinearFp32::Prepare(const std::vector<int> &input_shape, const
float *input_data, float *output_data, const bool align_corners,
const int thread_num) {
in_tensor_.set_data_type(kNumberTypeFloat32);
in_tensor_.set_format(Format_NHWC);
in_tensor_.set_format(mindspore::NHWC);
in_tensor_.set_shape(input_shape);
out_tensor_.set_data_type(kNumberTypeFloat32);
out_tensor_.set_shape(output_shape);

View File

@ -28,17 +28,17 @@ class TestROIPoolingFp32 : public mindspore::CommonTest {
int ROIPoolingTestInit(std::vector<lite::Tensor *> *inputs_, std::vector<lite::Tensor *> *outputs_, float *a_ptr,
float *b_ptr, const std::vector<int> &a_shape, const std::vector<int> &b_shape,
const std::vector<int> &c_shape) {
auto in_t = new lite::Tensor(kNumberTypeFloat, a_shape, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
auto in_t = new lite::Tensor(kNumberTypeFloat, a_shape, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR);
in_t->MallocData();
memcpy(in_t->MutableData(), a_ptr, sizeof(float) * in_t->ElementsNum());
inputs_->push_back(in_t);
auto roi_t = new lite::Tensor(kNumberTypeFloat, b_shape, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
auto roi_t = new lite::Tensor(kNumberTypeFloat, b_shape, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR);
roi_t->MallocData();
memcpy(roi_t->MutableData(), b_ptr, sizeof(float) * roi_t->ElementsNum());
inputs_->push_back(roi_t);
auto out_t = new lite::Tensor(kNumberTypeFloat, c_shape, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
auto out_t = new lite::Tensor(kNumberTypeFloat, c_shape, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR);
out_t->MallocData();
outputs_->push_back(out_t);

View File

@ -26,7 +26,6 @@ using mindspore::schema::ActivationType;
using mindspore::schema::ActivationType_NO_ACTIVATION;
using mindspore::schema::ActivationType_RELU;
using mindspore::schema::ActivationType_RELU6;
using mindspore::schema::Format_NHWC;
namespace mindspore {
class TestScaleFp32 : public mindspore::CommonTest {
@ -66,13 +65,13 @@ void TestScaleFp32::Prepare(const std::vector<int> &input_shape, const std::vect
float *input_data, float *scale_data, float *offset_data, float *output_data, int axis,
ActivationType act_type, const int thread_num) {
in_tensor_.set_data_type(kNumberTypeFloat32);
in_tensor_.set_format(Format_NHWC);
in_tensor_.set_format(mindspore::NHWC);
in_tensor_.set_shape(input_shape);
scale_tensor_.set_data_type(kNumberTypeFloat32);
scale_tensor_.set_format(Format_NHWC);
scale_tensor_.set_format(mindspore::NHWC);
scale_tensor_.set_shape(scale_shape);
offset_tensor_.set_data_type(kNumberTypeFloat32);
offset_tensor_.set_format(Format_NHWC);
offset_tensor_.set_format(mindspore::NHWC);
offset_tensor_.set_shape(offset_shape);
out_tensor_.set_data_type(kNumberTypeFloat32);
out_tensor_.set_shape(output_shape);

View File

@ -33,14 +33,14 @@ class TestSkipGramFp32 : public mindspore::CommonTest {
void SkipGramTestInit(std::vector<Tensor *> *inputs_, std::vector<Tensor *> *outputs_,
SkipGramParameter *skip_gram_param) {
Tensor *in_t_first = new Tensor(kObjectTypeString, {}, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
Tensor *in_t_first = new Tensor(kObjectTypeString, {}, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR);
char sentence[] = "The quick brown fox jumps over the lazy dog";
std::vector<StringPack> str;
str.push_back({43, sentence});
mindspore::lite::WriteStringsToTensor(in_t_first, str);
inputs_->push_back(in_t_first);
Tensor *output = new Tensor(kObjectTypeString, {}, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
Tensor *output = new Tensor(kObjectTypeString, {}, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR);
outputs_->push_back(output);
skip_gram_param->ngram_size = 3;

View File

@ -54,7 +54,7 @@ TEST_F(SpaceToDepthTestFp32, SpaceToDepthTest2) {
lite::Tensor input_tensor;
input_tensor.set_data(input.data());
input_tensor.set_shape(in_shape);
input_tensor.set_format(schema::Format_NHWC);
input_tensor.set_format(mindspore::NHWC);
input_tensor.set_data_type(kNumberTypeFloat32);
std::vector<lite::Tensor *> inputs_tensor;
inputs_tensor.push_back(&input_tensor);
@ -66,7 +66,7 @@ TEST_F(SpaceToDepthTestFp32, SpaceToDepthTest2) {
lite::Tensor output_tensor;
output_tensor.set_data(output.data());
output_tensor.set_shape(out_shape);
output_tensor.set_format(schema::Format_NHWC);
output_tensor.set_format(mindspore::NHWC);
output_tensor.set_data_type(kNumberTypeFloat32);
std::vector<lite::Tensor *> outputs_tensor;
outputs_tensor.push_back(&output_tensor);

View File

@ -220,7 +220,7 @@ TEST_F(TestTransposeFp32, TransposeFp32_test5) { /* 1x2x3x2x2 */
lite::Tensor input_tensor;
input_tensor.set_data(input.data());
input_tensor.set_shape(input_shape);
input_tensor.set_format(schema::Format_NHWC);
input_tensor.set_format(mindspore::NHWC);
input_tensor.set_data_type(kNumberTypeFloat32);
lite::Tensor perm_tensor(kNumberTypeInt32, {5});
perm_tensor.set_data(perm);
@ -228,7 +228,7 @@ TEST_F(TestTransposeFp32, TransposeFp32_test5) { /* 1x2x3x2x2 */
lite::Tensor output_tensor;
output_tensor.set_data(output.data());
output_tensor.set_shape(output_shape);
output_tensor.set_format(schema::Format_NHWC);
output_tensor.set_format(mindspore::NHWC);
output_tensor.set_data_type(kNumberTypeFloat32);
std::vector<lite::Tensor *> outputs_tensor;
outputs_tensor.emplace_back(&output_tensor);

View File

@ -71,7 +71,7 @@ TEST_F(TestConv1x1Int8, Input1x1PrePack2) {
int Conv1x1Int8TestInit1_perchannel(std::vector<lite::Tensor *> *inputs_, std::vector<lite::Tensor *> *outputs_,
ConvParameter *conv_param, int8_t **correct) {
Tensor *in_t = new Tensor(kNumberTypeInt8, {1, 2, 3, 4}, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
Tensor *in_t = new Tensor(kNumberTypeInt8, {1, 2, 3, 4}, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR);
auto in_quant_arg = new mindspore::lite::QuantArg();
in_quant_arg->zeroPoint = -42, in_quant_arg->scale = 0.117647;
in_t->AddQuantParam(*in_quant_arg);
@ -81,8 +81,7 @@ int Conv1x1Int8TestInit1_perchannel(std::vector<lite::Tensor *> *inputs_, std::v
memcpy(in_t->MutableData(), in, in_t->ElementsNum() * sizeof(int8_t));
inputs_->push_back(in_t);
Tensor *weight_t =
new Tensor(kNumberTypeInt8, {3, 1, 1, 4}, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
Tensor *weight_t = new Tensor(kNumberTypeInt8, {3, 1, 1, 4}, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR);
weight_t->MallocData();
auto weight_quant_arg1 = new mindspore::lite::QuantArg();
weight_quant_arg1->zeroPoint = 66, weight_quant_arg1->scale = 0.96439215686275;
@ -97,7 +96,7 @@ int Conv1x1Int8TestInit1_perchannel(std::vector<lite::Tensor *> *inputs_, std::v
memcpy(weight_t->MutableData(), weight, weight_t->ElementsNum() * sizeof(int8_t));
inputs_->push_back(weight_t);
Tensor *out_t = new Tensor(kNumberTypeInt8, {1, 2, 3, 3}, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
Tensor *out_t = new Tensor(kNumberTypeInt8, {1, 2, 3, 3}, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR);
out_t->MallocData();
auto output_quant_arg = new mindspore::lite::QuantArg();
output_quant_arg->zeroPoint = 7, output_quant_arg->scale = 0.294321233;
@ -141,7 +140,7 @@ TEST_F(TestConv1x1Int8, Conv1x1TestPerChannel) {
int Conv1x1Int8TestInit1(std::vector<lite::Tensor *> *inputs_, std::vector<lite::Tensor *> *outputs_,
ConvParameter *conv_param, int8_t **correct) {
Tensor *in_t = new Tensor(kNumberTypeInt8, {1, 2, 3, 4}, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
Tensor *in_t = new Tensor(kNumberTypeInt8, {1, 2, 3, 4}, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR);
auto in_quant_arg = new mindspore::lite::QuantArg();
in_quant_arg->zeroPoint = -42, in_quant_arg->scale = 0.117647;
in_t->AddQuantParam(*in_quant_arg);
@ -153,8 +152,7 @@ int Conv1x1Int8TestInit1(std::vector<lite::Tensor *> *inputs_, std::vector<lite:
reinterpret_cast<int8_t *>(in_t->MutableData()));
inputs_->push_back(in_t);
Tensor *weight_t =
new Tensor(kNumberTypeInt8, {3, 1, 1, 4}, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
Tensor *weight_t = new Tensor(kNumberTypeInt8, {3, 1, 1, 4}, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR);
auto weight_quant_arg = new mindspore::lite::QuantArg();
weight_quant_arg->zeroPoint = 66, weight_quant_arg->scale = 0.036439215686275;
weight_t->AddQuantParam(*weight_quant_arg);
@ -165,7 +163,7 @@ int Conv1x1Int8TestInit1(std::vector<lite::Tensor *> *inputs_, std::vector<lite:
reinterpret_cast<int8_t *>(weight_t->MutableData()));
inputs_->push_back(weight_t);
Tensor *out_t = new Tensor(kNumberTypeInt8, {1, 2, 3, 3}, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
Tensor *out_t = new Tensor(kNumberTypeInt8, {1, 2, 3, 3}, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR);
out_t->MallocData();
auto output_quant_arg = new mindspore::lite::QuantArg();
output_quant_arg->zeroPoint = 7, output_quant_arg->scale = 0.234321233;
@ -212,7 +210,7 @@ TEST_F(TestConv1x1Int8, Conv1x1Int8Test1) {
int Conv1x1Int8TestInit2(std::vector<lite::Tensor *> *inputs_, std::vector<lite::Tensor *> *outputs_,
ConvParameter *conv_param, int8_t **correct) {
size_t buffer_size;
Tensor *in_t = new Tensor(kNumberTypeInt8, {1, 2, 3, 4}, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
Tensor *in_t = new Tensor(kNumberTypeInt8, {1, 2, 3, 4}, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR);
auto in_quant_arg = new mindspore::lite::QuantArg();
in_quant_arg->zeroPoint = -42, in_quant_arg->scale = 0.117647;
in_t->AddQuantParam(*in_quant_arg);
@ -223,8 +221,7 @@ int Conv1x1Int8TestInit2(std::vector<lite::Tensor *> *inputs_, std::vector<lite:
inputs_->push_back(in_t);
delete[] input;
Tensor *weight_t =
new Tensor(kNumberTypeInt8, {3, 1, 1, 4}, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
Tensor *weight_t = new Tensor(kNumberTypeInt8, {3, 1, 1, 4}, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR);
auto weight_quant_arg = new mindspore::lite::QuantArg();
weight_quant_arg->zeroPoint = 66, weight_quant_arg->scale = 0.036439215686275;
weight_t->AddQuantParam(*weight_quant_arg);
@ -235,7 +232,7 @@ int Conv1x1Int8TestInit2(std::vector<lite::Tensor *> *inputs_, std::vector<lite:
inputs_->push_back(weight_t);
delete[] weight;
Tensor *bias_t = new Tensor(kNumberTypeInt32, {4}, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
Tensor *bias_t = new Tensor(kNumberTypeInt32, {4}, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR);
weight_t->MallocData();
std::string bias_path = "./bias";
auto bias = mindspore::lite::ReadFile(bias_path.c_str(), &buffer_size);
@ -243,7 +240,7 @@ int Conv1x1Int8TestInit2(std::vector<lite::Tensor *> *inputs_, std::vector<lite:
inputs_->push_back(bias_t);
delete[] bias;
Tensor *out_t = new Tensor(kNumberTypeInt8, {1, 2, 3, 3}, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
Tensor *out_t = new Tensor(kNumberTypeInt8, {1, 2, 3, 3}, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR);
out_t->MallocData();
auto output_quant_arg = new mindspore::lite::QuantArg();
output_quant_arg->zeroPoint = 7, output_quant_arg->scale = 0.234321233;

View File

@ -30,7 +30,6 @@ using mindspore::lite::DeviceType;
namespace mindspore {
using mindspore::lite::QuantArg;
using mindspore::lite::Tensor;
using mindspore::schema::Format_NHWC;
class TestDeconvInt8 : public mindspore::CommonTest {
public:
TestDeconvInt8() {}
@ -274,7 +273,7 @@ int DeConvInt8TestInit1(std::vector<lite::Tensor *> *inputs_, std::vector<lite::
ConvParameter *conv_param, int8_t **correct) {
/* float data from deconv fp32 testcase : DeConvTestInit2 */
/* vq = (vi - zp) * s vi = vq / s + zp */
auto *in_t = new Tensor(kNumberTypeInt8, {1, 4, 2, 3}, Format_NHWC, lite::Tensor::Category::VAR);
auto *in_t = new Tensor(kNumberTypeInt8, {1, 4, 2, 3}, mindspore::NHWC, lite::Tensor::Category::VAR);
in_t->MallocData();
int8_t in[] = {6, 43, 38, 24, -8, 12, 41, -24, -20, 41, -19, -6, -26, -6, 23, -31, 34, 45, 8, 45, -39, -27, -48, 12};
memcpy(in_t->MutableData(), in, sizeof(int8_t) * in_t->ElementsNum());
@ -283,7 +282,7 @@ int DeConvInt8TestInit1(std::vector<lite::Tensor *> *inputs_, std::vector<lite::
in_t->AddQuantParam(*in_quant_arg);
inputs_->push_back(in_t);
auto *weight_t = new Tensor(kNumberTypeInt8, {3, 3, 3, 2}, Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
auto *weight_t = new Tensor(kNumberTypeInt8, {3, 3, 3, 2}, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR);
weight_t->MallocData();
int8_t weight[] = {66, 89, 98, 74, 95, 86, 125, 95, 105, 83, 116, 94, 90, 80, 86, 59, 72, 92,
64, 76, 92, 80, 90, 87, 106, 55, 105, 60, 75, 53, 81, 81, 98, 81, 86, 59,
@ -294,7 +293,7 @@ int DeConvInt8TestInit1(std::vector<lite::Tensor *> *inputs_, std::vector<lite::
weight_t->AddQuantParam(*w_quant_arg);
inputs_->push_back(weight_t);
auto *out_t = new Tensor(kNumberTypeInt8, {1, 7, 3, 2}, Format_NHWC, lite::Tensor::Category::VAR);
auto *out_t = new Tensor(kNumberTypeInt8, {1, 7, 3, 2}, mindspore::NHWC, lite::Tensor::Category::VAR);
out_t->MallocData();
auto *out_quant_arg = new QuantArg();
out_quant_arg->zeroPoint = 31, out_quant_arg->scale = 0.3439215686275;

View File

@ -42,7 +42,7 @@ extern void QuantProcess(float *input, int len, float min, float max, float *sca
extern lite::Tensor *MakeQuantTensor(int8_t *data, int len, std::vector<int> *shape, float scale, int zp);
lite::Tensor *MakeIntTensor(int *data, int len, std::vector<int> *shape) {
auto tensor = new lite::Tensor(kNumberTypeInt32, *shape, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
auto tensor = new lite::Tensor(kNumberTypeInt32, *shape, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR);
tensor->MallocData();
auto tensor_ptr = reinterpret_cast<int *>(tensor->MutableData());
memcpy(tensor_ptr, data, len * sizeof(int));

View File

@ -47,7 +47,7 @@ void QuantProcess(float *input, int len, float min, float max, float *scale, int
}
lite::Tensor *MakeQuantTensor(int8_t *data, int len, std::vector<int> *shape, float scale, int zp) {
auto tensor = new lite::Tensor(kNumberTypeInt8, *shape, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
auto tensor = new lite::Tensor(kNumberTypeInt8, *shape, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR);
tensor->MallocData();
if (data) {
auto tensor_ptr = reinterpret_cast<int8_t *>(tensor->MutableData());

View File

@ -33,7 +33,7 @@ class TestPadInt8 : public mindspore::CommonTest {
int PadInt8TestInit1(std::vector<Tensor *> *inputs_, std::vector<Tensor *> *outputs_, PadParameter *pad_param,
int8_t **correct) {
Tensor *in_t = new Tensor(kNumberTypeInt8, {3}, schema::Format_NHWC, lite::Tensor::CONST_TENSOR);
Tensor *in_t = new Tensor(kNumberTypeInt8, {3}, mindspore::NHWC, lite::Tensor::CONST_TENSOR);
in_t->MallocData();
int8_t in[] = {1, 1, 1};
memcpy(in_t->MutableData(), in, sizeof(int8_t) * in_t->ElementsNum());
@ -42,7 +42,7 @@ int PadInt8TestInit1(std::vector<Tensor *> *inputs_, std::vector<Tensor *> *outp
in_t->AddQuantParam(*in_quant_arg);
inputs_->push_back(in_t);
Tensor *out_t = new Tensor(kNumberTypeInt8, {7}, schema::Format_NHWC, lite::Tensor::CONST_TENSOR);
Tensor *out_t = new Tensor(kNumberTypeInt8, {7}, mindspore::NHWC, lite::Tensor::CONST_TENSOR);
out_t->MallocData();
QuantArg *out_quant_arg = new QuantArg();
out_quant_arg->zeroPoint = 10, out_quant_arg->scale = 0.31228156;
@ -84,7 +84,7 @@ TEST_F(TestPadInt8, PadInt8Test1) {
int PadInt8TestInit2(std::vector<Tensor *> *inputs_, std::vector<Tensor *> *outputs_, PadParameter *pad_param,
int8_t **correct) {
Tensor *in_t = new Tensor(kNumberTypeInt8, {6, 2}, schema::Format_NHWC, lite::Tensor::VAR);
Tensor *in_t = new Tensor(kNumberTypeInt8, {6, 2}, mindspore::NHWC, lite::Tensor::VAR);
in_t->MallocData();
int8_t in[] = {18, 71, 99, -6, 5, -119, 86, 13, 15, -85, -41, -77};
memcpy(in_t->MutableData(), in, sizeof(int8_t) * in_t->ElementsNum());
@ -93,7 +93,7 @@ int PadInt8TestInit2(std::vector<Tensor *> *inputs_, std::vector<Tensor *> *outp
in_t->AddQuantParam(*in_quant_arg);
inputs_->push_back(in_t);
Tensor *out_t = new Tensor(kNumberTypeInt8, {10, 5}, schema::Format_NHWC, lite::Tensor::VAR);
Tensor *out_t = new Tensor(kNumberTypeInt8, {10, 5}, mindspore::NHWC, lite::Tensor::VAR);
out_t->MallocData();
QuantArg *out_quant_arg = new QuantArg();
out_quant_arg->zeroPoint = 10, out_quant_arg->scale = 0.31228156;
@ -137,7 +137,7 @@ TEST_F(TestPadInt8, PadInt8Test2) {
int PadInt8TestInit4(std::vector<Tensor *> *inputs_, std::vector<Tensor *> *outputs_, PadParameter *pad_param,
int8_t **correct) {
Tensor *in_t = new Tensor(kNumberTypeInt8, {2, 3, 2, 1}, schema::Format_NHWC, lite::Tensor::VAR);
Tensor *in_t = new Tensor(kNumberTypeInt8, {2, 3, 2, 1}, mindspore::NHWC, lite::Tensor::VAR);
in_t->MallocData();
int8_t in[] = {73, 24, 7, -31, -109, -2, 69, -64, 51, -45, 38, 53};
memcpy(in_t->MutableData(), in, sizeof(int8_t) * in_t->ElementsNum());
@ -146,7 +146,7 @@ int PadInt8TestInit4(std::vector<Tensor *> *inputs_, std::vector<Tensor *> *outp
in_t->AddQuantParam(*in_quant_arg);
inputs_->push_back(in_t);
Tensor *out_t = new Tensor(kNumberTypeInt8, {6, 6, 4, 3}, schema::Format_NHWC, lite::Tensor::VAR);
Tensor *out_t = new Tensor(kNumberTypeInt8, {6, 6, 4, 3}, mindspore::NHWC, lite::Tensor::VAR);
out_t->MallocData();
QuantArg *out_quant_arg = new QuantArg();
out_quant_arg->zeroPoint = 10, out_quant_arg->scale = 0.31228156;

View File

@ -43,7 +43,7 @@ TEST_F(QuantDTypeCastTestFp32, QuantDTypeCastTest1) {
input_tensor.set_data(input.data());
input_tensor.set_shape(in_shape);
input_tensor.set_data_type(kNumberTypeInt8);
input_tensor.set_format(schema::Format_NHWC);
input_tensor.set_format(mindspore::NHWC);
input_tensor.AddQuantParam(quant_arg);
std::vector<lite::Tensor *> inputs_tensor;
@ -103,7 +103,7 @@ TEST_F(QuantDTypeCastTestFp32, QuantDTypeCastTest2) {
lite::Tensor output_tensor;
output_tensor.set_data(output.data());
output_tensor.set_shape(out_shape);
output_tensor.set_format(schema::Format_NHWC);
output_tensor.set_format(mindspore::NHWC);
output_tensor.set_data_type(kNumberTypeInt8);
std::vector<lite::Tensor *> outputs_tensor;
outputs_tensor.emplace_back(&output_tensor);

View File

@ -47,7 +47,7 @@ class TestNormalize : public mindspore::CommonTest {
void TestNormalize::NormalizeTestInit() {
input_tensor_.set_data_type(kObjectTypeString);
input_tensor_.set_format(schema::Format_NHWC);
input_tensor_.set_format(mindspore::NHWC);
std::vector<StringPack> str_pack;
const char sentence1[] = " I don't know what happened\n";
@ -57,7 +57,7 @@ void TestNormalize::NormalizeTestInit() {
mindspore::lite::WriteStringsToTensor(&input_tensor_, str_pack);
output_tensor_.set_data_type(kObjectTypeString);
output_tensor_.set_format(schema::Format_NHWC);
output_tensor_.set_format(mindspore::NHWC);
}
TEST_F(TestNormalize, TestSentence) {

View File

@ -63,8 +63,8 @@ TEST_F(TestCastSelfOpenCL, Castfp32tofp16) {
MS_LOG(INFO) << " init tensors ";
std::vector<int> shape = {1, 23, 39, 47};
auto tensor_type = lite::Tensor::CONST_TENSOR;
auto *input_tensor = new (std::nothrow) lite::Tensor(kNumberTypeFloat32, shape, schema::Format_NHWC, tensor_type);
auto *output_tensor = new (std::nothrow) lite::Tensor(kNumberTypeFloat16, shape, schema::Format_NHWC, tensor_type);
auto *input_tensor = new (std::nothrow) lite::Tensor(kNumberTypeFloat32, shape, mindspore::NHWC, tensor_type);
auto *output_tensor = new (std::nothrow) lite::Tensor(kNumberTypeFloat16, shape, mindspore::NHWC, tensor_type);
if (input_tensor == nullptr || output_tensor == nullptr) {
MS_LOG(INFO) << " new input_tensor or output_tensor failed ";
return;
@ -175,8 +175,8 @@ TEST_F(TestCastSelfOpenCL, Castfp16tofp32) {
MS_LOG(INFO) << " init tensors ";
std::vector<int> shape = {1, 23, 39, 47};
auto tensor_type = lite::Tensor::CONST_TENSOR;
auto *input_tensor = new (std::nothrow) lite::Tensor(kNumberTypeFloat16, shape, schema::Format_NHWC, tensor_type);
auto *output_tensor = new (std::nothrow) lite::Tensor(kNumberTypeFloat32, shape, schema::Format_NHWC, tensor_type);
auto *input_tensor = new (std::nothrow) lite::Tensor(kNumberTypeFloat16, shape, mindspore::NHWC, tensor_type);
auto *output_tensor = new (std::nothrow) lite::Tensor(kNumberTypeFloat32, shape, mindspore::NHWC, tensor_type);
if (input_tensor == nullptr || output_tensor == nullptr) {
MS_LOG(INFO) << " new input_tensor or output_tensor failed ";
return;

View File

@ -24,7 +24,6 @@
using mindspore::kernel::LiteKernel;
using mindspore::kernel::OpenCLSubGraph;
using mindspore::lite::KernelRegistry;
using mindspore::schema::Format::Format_NHWC;
namespace mindspore::lite::opencl::test {
// muti-output
@ -67,11 +66,11 @@ void TestMain(const std::vector<ArgsTupleWithDtype> &input_infos, const std::vec
auto &shape = std::get<0>(input_info);
auto category = std::get<2>(input_info);
auto data_type = std::get<3>(input_info);
in_tensors.emplace_back(std::make_shared<Tensor>(data_type, shape, Format_NHWC, category));
in_tensors.emplace_back(std::make_shared<Tensor>(data_type, shape, mindspore::NHWC, category));
}
for (auto outout_info : output_info) {
const std::vector<int> &output_shape = std::get<0>(outout_info);
out_tensors.emplace_back(std::make_shared<Tensor>(std::get<2>(outout_info), output_shape, Format_NHWC, VAR));
out_tensors.emplace_back(std::make_shared<Tensor>(std::get<2>(outout_info), output_shape, mindspore::NHWC, VAR));
}
// secondly, init weight Tensor's data
std::vector<Tensor *> kernel_inputs;
@ -231,7 +230,7 @@ void TestMain(const std::vector<ArgsTupleWithDtype> &input_infos, std::tuple<std
auto &shape = std::get<0>(input_info);
auto category = std::get<2>(input_info);
auto data_type = std::get<3>(input_info);
tensors.emplace_back(std::make_shared<Tensor>(data_type, shape, Format_NHWC, category));
tensors.emplace_back(std::make_shared<Tensor>(data_type, shape, mindspore::NHWC, category));
}
// secondly, init weight Tensor's data
std::vector<Tensor *> kernel_inputs;
@ -257,7 +256,7 @@ void TestMain(const std::vector<ArgsTupleWithDtype> &input_infos, std::tuple<std
const std::vector<int> &output_shape = std::get<0>(output_info);
float *expect_data = std::get<1>(output_info);
auto output = Tensor(kNumberTypeFloat32, output_shape, Format_NHWC, VAR);
auto output = Tensor(kNumberTypeFloat32, output_shape, mindspore::NHWC, VAR);
// simulating benchmark: session_->CompileGraph() -> scheduler.Schedule() -> BuildKernels()
MS_LOG(DEBUG) << "create OpenCLKernel";

View File

@ -22,7 +22,6 @@
using mindspore::lite::Tensor;
using mindspore::schema::PrimitiveType_Fill;
using mindspore::schema::PrimitiveType_Shape;
using mindspore::schema::Format::Format_NHWC;
// PrimitiveType_Fill: src/ops/populate/fill_populate.cc
@ -45,8 +44,8 @@ TEST_F(TestFillOpenCLCI, Fp32testfill) {
float correctOutput[] = {9, 9, 9, 9, 9, 9, 9, 9, 9};
auto data_type = kNumberTypeFloat32;
std::vector<int> output_shape = {3, 3};
auto in_tensor1 = Tensor(data_type, input_shape1, Format_NHWC, lite::Tensor::VAR);
auto output_tensor = Tensor(data_type, output_shape, Format_NHWC, lite::Tensor::VAR);
auto in_tensor1 = Tensor(data_type, input_shape1, mindspore::NHWC, lite::Tensor::VAR);
auto output_tensor = Tensor(data_type, output_shape, mindspore::NHWC, lite::Tensor::VAR);
std::vector<lite::Tensor *> inputs{&in_tensor1};
std::vector<lite::Tensor *> outputs{&output_tensor};
@ -116,8 +115,8 @@ TEST_F(TestFillOpenCLCI, Fp32testshape) {
float correctOutput[] = {2, 4};
auto data_type = kNumberTypeFloat32;
std::vector<int> output_shape = {2};
auto in_tensor1 = Tensor(data_type, input_shape1, Format_NHWC, lite::Tensor::VAR);
auto output_tensor = Tensor(data_type, output_shape, Format_NHWC, lite::Tensor::VAR);
auto in_tensor1 = Tensor(data_type, input_shape1, mindspore::NHWC, lite::Tensor::VAR);
auto output_tensor = Tensor(data_type, output_shape, mindspore::NHWC, lite::Tensor::VAR);
std::vector<lite::Tensor *> inputs{&in_tensor1};
std::vector<lite::Tensor *> outputs{&output_tensor};

View File

@ -932,7 +932,7 @@ std::string GenerateOutputFileName(tensor::MSTensor *tensor, const std::string &
if (TYPE_ID_MAP.find(tensor->data_type()) != TYPE_ID_MAP.end()) {
file_name += TYPE_ID_MAP.at(tensor->data_type());
}
auto tensor_format = static_cast<lite::Tensor *>(tensor)->format();
auto tensor_format = static_cast<schema::Format>(static_cast<lite::Tensor *>(tensor)->format());
if (TENSOR_FORMAT_MAP.find(tensor_format) != TENSOR_FORMAT_MAP.end()) {
file_name += "_" + TENSOR_FORMAT_MAP.at(tensor_format) + ".bin";
}

View File

@ -89,7 +89,7 @@ void ConvertString(MetaGraphT *graph, uint32_t index, bool *convert_succ, std::v
auto &tensorT = graph->allTensors.at(index);
auto tensor_shape = tensorT->dims;
lite_tensor = std::make_unique<Tensor>(
TypeId(tensorT->dataType), tensor_shape, tensorT->format,
TypeId(tensorT->dataType), tensor_shape, static_cast<mindspore::Format>(tensorT->format),
TensorCategory(tensorT->nodeType, tensorT->dims.size(), TypeId(tensorT->dataType), tensorT->data.size()));
if (lite_tensor == nullptr) {
MS_LOG(ERROR) << "lite tensor is nullptr";
@ -117,7 +117,7 @@ void ConvertOtherTensor(MetaGraphT *graph, uint32_t index, bool *convert_succ, s
auto &tensorT = graph->allTensors.at(index);
auto tensor_shape = tensorT->dims;
lite_tensor = std::make_unique<Tensor>(
TypeId(tensorT->dataType), tensor_shape, tensorT->format,
TypeId(tensorT->dataType), tensor_shape, static_cast<mindspore::Format>(tensorT->format),
TensorCategory(tensorT->nodeType, tensorT->dims.size(), TypeId(tensorT->dataType), tensorT->data.size()));
if (lite_tensor == nullptr) {
MS_LOG(ERROR) << "lite tensor is nullptr";
@ -227,7 +227,7 @@ void SetDataType(MetaGraphT *graph, const std::vector<Tensor *> &output_tensors,
uint32_t i, uint32_t infer_node_index) {
auto &node = graph->nodes.at(infer_node_index);
auto &output_tensor = graph->allTensors.at(node->outputIndex[i]);
output_tensor->format = output_tensors[i]->format();
output_tensor->format = static_cast<schema::Format>(output_tensors[i]->format());
output_tensor->dataType = output_tensors[i]->data_type();
if (output_tensors[i]->data_type() == kObjectTypeTensorType) {
auto tensor_list = reinterpret_cast<TensorList *>(output_tensors[i]);

View File

@ -85,7 +85,7 @@ std::vector<Tensor *> GetCNodeInputTensors(const CNodePtr &cnode, lite::converte
return {};
}
auto tensor = new (std::nothrow)
Tensor(TypeId(data_info.data_type_), data_info.shape_, schema::Format(data_info.format_),
Tensor(TypeId(data_info.data_type_), data_info.shape_, static_cast<mindspore::Format>(data_info.format_),
lite::TensorCategory(0, data_info.shape_.size(), TypeId(data_info.data_type_), data_info.data_.size()));
if (tensor == nullptr) {
MS_LOG(ERROR) << "new a tensor is nullptr.";

View File

@ -201,9 +201,9 @@ STATUS InferShapePass::GetCNodeInputTensors(const CNodePtr &cnode, std::vector<l
tensor->set_shape(shape);
tensor->set_data_type(tensor_info->data_type());
if (primitive->GetAttr(ops::kFormat) != nullptr && i == WEIGHT_INDEX) {
tensor->set_format(static_cast<schema::Format>(GetValue<int64_t>(primitive->GetAttr(ops::kFormat))));
tensor->set_format(static_cast<mindspore::Format>(GetValue<int64_t>(primitive->GetAttr(ops::kFormat))));
} else {
tensor->set_format(schema::Format::Format_NHWC);
tensor->set_format(mindspore::NHWC);
}
}

View File

@ -51,7 +51,7 @@ void SetConvWeightFormat(const CNodePtr &cnode, const std::vector<lite::Tensor *
auto prim = GetValueNode<PrimitivePtr>(cnode->input(0));
MS_ASSERT(prim != nullptr);
if (prim->GetAttr(ops::kFormat) != nullptr && inputs.size() > 1) {
inputs[1]->set_format(static_cast<schema::Format>(GetValue<int64_t>(prim->GetAttr(ops::kFormat))));
inputs[1]->set_format(static_cast<mindspore::Format>(GetValue<int64_t>(prim->GetAttr(ops::kFormat))));
}
}
@ -63,7 +63,7 @@ void RectifyFormat(const CNodePtr &cnode, const std::vector<lite::Tensor *> &inp
for (auto &input : inputs) {
auto shape = input->shape();
if (shape.size() == 4 && shape[3] == 3 && shape[1] == -1) {
input->set_format(schema::Format_NHWC);
input->set_format(mindspore::NHWC);
}
}
}
@ -431,7 +431,7 @@ STATUS NodeInferShape::ConvertToLiteTensor(const std::vector<lite::DataInfo> &da
lite::Tensor *tensor = nullptr;
if (data_info.data_type_ != kObjectTypeTensorType) {
tensor = new (std::nothrow) lite::Tensor(TypeId(data_info.data_type_), data_info.shape_,
(schema::Format)data_info.format_, tensor_category);
(mindspore::Format)data_info.format_, tensor_category);
} else {
tensor = new (std::nothrow) lite::TensorList(data_info.shape_, std::vector<int>(), tensor_category);
}