From ab0f0dff00f3e05c0d215a13283e343956ae918f Mon Sep 17 00:00:00 2001 From: wsc Date: Mon, 21 Sep 2020 15:23:21 +0800 Subject: [PATCH] Fix bug of resizeNearestNeighbor and expandDims operators. --- mindspore/lite/nnacl/fp32/expandDims.c | 2 +- mindspore/lite/nnacl/fp32/expandDims.h | 2 +- mindspore/lite/src/ops/resize.cc | 48 ++--------------- .../runtime/kernel/arm/base/resize_base.cc | 53 ++++++++++++++----- .../src/runtime/kernel/arm/base/resize_base.h | 1 + .../src/runtime/kernel/arm/fp32/expandDims.cc | 23 +++++--- .../src/runtime/kernel/arm/fp32/expandDims.h | 4 +- .../lite/src/runtime/kernel/arm/fp32/pad.cc | 6 +-- .../src/runtime/kernel/arm/fp32/resize.cc | 11 ++++ .../lite/src/runtime/kernel/arm/fp32/shape.cc | 1 + .../runtime/kernel/arm/int8/squeeze_int8.h | 4 +- 11 files changed, 83 insertions(+), 72 deletions(-) diff --git a/mindspore/lite/nnacl/fp32/expandDims.c b/mindspore/lite/nnacl/fp32/expandDims.c index 43f5233b09..660b8b8aa9 100644 --- a/mindspore/lite/nnacl/fp32/expandDims.c +++ b/mindspore/lite/nnacl/fp32/expandDims.c @@ -18,7 +18,7 @@ #include #include "nnacl/errorcode.h" -int ExpandDims(float *input_ptr, float *output_ptr, size_t data_size) { +int ExpandDims(void *input_ptr, void *output_ptr, size_t data_size) { memcpy(output_ptr, input_ptr, data_size); return NNACL_OK; } diff --git a/mindspore/lite/nnacl/fp32/expandDims.h b/mindspore/lite/nnacl/fp32/expandDims.h index 1df637c947..e106e1189d 100644 --- a/mindspore/lite/nnacl/fp32/expandDims.h +++ b/mindspore/lite/nnacl/fp32/expandDims.h @@ -27,7 +27,7 @@ typedef struct ExpandDimsParameter { #ifdef __cplusplus extern "C" { #endif -int ExpandDims(float *input_ptr, float *output_ptr, size_t data_size); +int ExpandDims(void *input_ptr, void *output_ptr, size_t data_size); #ifdef __cplusplus } #endif diff --git a/mindspore/lite/src/ops/resize.cc b/mindspore/lite/src/ops/resize.cc index ac06eec07e..3a8d79d2f9 100644 --- a/mindspore/lite/src/ops/resize.cc +++ b/mindspore/lite/src/ops/resize.cc @@ -97,31 +97,13 @@ int Resize::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers: namespace { constexpr int kInputRank = 4; } // namespace -template -void CalShape(const T *data, const std::vector &inputs, std::vector *out_shape, int shape_size) { - int input_count = inputs[0]->ElementsNum(); - int index = 0; - int size = 1; - for (int i = 0; i < shape_size; i++) { - if (static_cast(data[i]) == -1) { - index = i; - } else { - size *= data[i]; - } - out_shape->push_back(data[i]); - } - if (static_cast(data[index]) == -1) { - (*out_shape)[index] = input_count / size; - } -} - int Resize::InferShape(std::vector inputs_, std::vector outputs_) { MS_ASSERT(this->primitive_ != nullptr); auto input = inputs_.front(); if (input == nullptr) { return RET_ERROR; } - if (input->shape().size() != kInputRank) { + if (!input->shape().empty() && input->shape().size() != kInputRank) { MS_LOG(ERROR) << "Size of input shape is wrong."; return RET_ERROR; } @@ -145,31 +127,9 @@ int Resize::InferShape(std::vector inputs_, std::vectorElementsNum(); - switch (shape_tensor->data_type()) { - case kNumberTypeInt8: { - auto data = reinterpret_cast(shape_tensor->MutableData()); - CalShape(data, inputs_, &output_shape, shape_size); - } break; - case kNumberTypeInt32: { - auto data = reinterpret_cast(shape_tensor->MutableData()); - CalShape(data, inputs_, &output_shape, shape_size); - } break; - case kNumberTypeInt64: { - auto data = reinterpret_cast(shape_tensor->MutableData()); - CalShape(data, inputs_, &output_shape, shape_size); - } break; - case kNumberTypeFloat: { - auto data = reinterpret_cast(shape_tensor->MutableData()); - CalShape(data, inputs_, &output_shape, shape_size); - } break; - case kNumberTypeUInt32: { - auto data = reinterpret_cast(shape_tensor->MutableData()); - CalShape(data, inputs_, &output_shape, shape_size); - } break; - default: { - MS_LOG(ERROR) << "Reshape weight tensor has unsupported dataType: " << shape_tensor->data_type(); - return RET_INFER_ERR; - } + auto data = reinterpret_cast(shape_tensor->data_c()); + for (size_t i = 0; i < shape_size; i++) { + output_shape.push_back(data[i]); } } else if (inputs_.size() == kSingleNum) { auto new_height = GetNewHeight(); diff --git a/mindspore/lite/src/runtime/kernel/arm/base/resize_base.cc b/mindspore/lite/src/runtime/kernel/arm/base/resize_base.cc index e4f8972c62..a05e2530fd 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/resize_base.cc +++ b/mindspore/lite/src/runtime/kernel/arm/base/resize_base.cc @@ -29,7 +29,7 @@ using mindspore::lite::RET_OK; namespace mindspore::kernel { namespace { -constexpr int kInputNum = 1; +constexpr int kMaxInputNum = 2; constexpr int kOutputNum = 1; constexpr int kRank = 4; } // namespace @@ -46,15 +46,35 @@ int ResizeBaseCPUKernel::CheckParameters() { MS_LOG(ERROR) << "Resize method should be bilinear or nearest_neighbor, but got " << method_; return RET_INVALID_OP_ATTR; } - new_height_ = parameter->new_height_; - if (new_height_ < 1) { - MS_LOG(ERROR) << "Resize new_height should >= 1, but got " << new_height_; - return RET_INVALID_OP_ATTR; - } - new_width_ = parameter->new_width_; - if (new_width_ < 1) { - MS_LOG(ERROR) << "Resize new_width should >= 1, but got " << new_width_; - return RET_INVALID_OP_ATTR; + if (this->in_tensors_.size() == lite::kSingleNum) { + new_height_ = parameter->new_height_; + if (new_height_ < 1) { + MS_LOG(ERROR) << "Resize new_height should >= 1, but got " << new_height_; + return RET_INVALID_OP_ATTR; + } + new_width_ = parameter->new_width_; + if (new_width_ < 1) { + MS_LOG(ERROR) << "Resize new_width should >= 1, but got " << new_width_; + return RET_INVALID_OP_ATTR; + } + } else if (this->in_tensors_.size() == lite::kDoubleNum) { + auto out_shape = this->in_tensors_[1]->MutableData(); + if (out_shape == nullptr) { + MS_LOG(INFO) << "Out shape is not assigned"; + const_shape_ = false; + } else { + new_height_ = reinterpret_cast(out_shape)[0]; + if (new_height_ < 1) { + MS_LOG(ERROR) << "Resize new_height should >= 1, but got " << new_height_; + return RET_INVALID_OP_ATTR; + } + new_width_ = reinterpret_cast(out_shape)[1]; + if (new_width_ < 1) { + MS_LOG(ERROR) << "Resize new_width should >= 1, but got " << new_width_; + return RET_INVALID_OP_ATTR; + } + const_shape_ = true; + } } align_corners_ = parameter->align_corners_; preserve_aspect_ratio = parameter->preserve_aspect_ratio_; @@ -66,8 +86,15 @@ int ResizeBaseCPUKernel::CheckParameters() { } int ResizeBaseCPUKernel::CheckInputsOuputs() { - if (in_tensors_.size() != kInputNum) { - MS_LOG(ERROR) << "Resize input num should be " << kInputNum << ", but got " << in_tensors_.size(); + if (in_tensors_.size() <= lite::kDoubleNum) { + for (size_t i = 0; i < in_tensors_.size(); i++) { + auto input = in_tensors_.at(i); + if (input == nullptr) { + return RET_NULL_PTR; + } + } + } else { + MS_LOG(ERROR) << "Resize input num should be no more than" << kMaxInputNum << ", but got " << in_tensors_.size(); return RET_ERROR; } auto input = in_tensors_.at(0); @@ -97,7 +124,7 @@ int ResizeBaseCPUKernel::Init() { auto input = in_tensors_.at(0); auto input_shape = input->shape(); - if (input_shape.size() != kRank) { + if (!input_shape.empty() && input_shape.size() != kRank) { MS_LOG(ERROR) << "Resize op support input rank 4, got " << input_shape.size(); return RET_ERROR; } diff --git a/mindspore/lite/src/runtime/kernel/arm/base/resize_base.h b/mindspore/lite/src/runtime/kernel/arm/base/resize_base.h index 3540855ce8..6ed3a47912 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/resize_base.h +++ b/mindspore/lite/src/runtime/kernel/arm/base/resize_base.h @@ -42,6 +42,7 @@ class ResizeBaseCPUKernel : public LiteKernel { int64_t new_width_; bool align_corners_; bool preserve_aspect_ratio; + bool const_shape_; private: int CheckParameters(); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/expandDims.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/expandDims.cc index cedad34e89..43bd33fb6d 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/expandDims.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/expandDims.cc @@ -48,10 +48,20 @@ int ExpandDimsCPUKernel::DoExpandDims(int task_id) { return RET_OK; } int offset = task_id * thread_sz_stride_; - int ret = ExpandDims(in_ptr_ + offset, out_ptr_ + offset, size * sizeof(float)); - if (ret != RET_OK) { - MS_LOG(ERROR) << "ExpandDimsRun error task_id[" << task_id << "] error_code[" << ret << "]"; - return ret; + if (this->in_tensors_[0]->data_type() == kNumberTypeFloat32) { + int ret = ExpandDims(reinterpret_cast(in_ptr_) + offset, + reinterpret_cast(out_ptr_) + offset, size * sizeof(float)); + if (ret != RET_OK) { + MS_LOG(ERROR) << "ExpandDimsRun error task_id[" << task_id << "] error_code[" << ret << "]"; + return ret; + } + } else if (this->in_tensors_[0]->data_type() == kNumberTypeInt8) { + int ret = ExpandDims(reinterpret_cast(in_ptr_) + offset, + reinterpret_cast(out_ptr_) + offset, size * sizeof(int8_t)); + if (ret != RET_OK) { + MS_LOG(ERROR) << "ExpandDimsRun error task_id[" << task_id << "] error_code[" << ret << "]"; + return ret; + } } return RET_OK; } @@ -72,8 +82,8 @@ int ExpandDimsCPUKernel::Run() { MS_LOG(ERROR) << "Prepare fail!ret: " << prepare_ret; return prepare_ret; } - in_ptr_ = reinterpret_cast(in_tensors_.at(0)->MutableData()); - out_ptr_ = reinterpret_cast(out_tensors_.at(0)->MutableData()); + in_ptr_ = in_tensors_.at(0)->MutableData(); + out_ptr_ = out_tensors_.at(0)->MutableData(); auto ret = ParallelLaunch(this->context_->thread_pool_, ExpandDimsRun, this, thread_sz_count_); if (ret != RET_OK) { MS_LOG(ERROR) << "ExpandDimsRun error error_code[" << ret << "]"; @@ -105,4 +115,5 @@ kernel::LiteKernel *CpuExpandsDimsFp32KernelCreator(const std::vector max_valid) { - MS_LOG(ERROR) << prefix << "paddings " << paddings[i * 2] << "should be less than " << max_valid + 1; - return RET_ERROR; + MS_LOG(WARNING) << prefix << "paddings " << paddings[i * 2] << "should be less than " << max_valid + 1; } if (paddings[i * 2 + 1] > max_valid) { - MS_LOG(ERROR) << prefix << "paddings " << paddings[i * 2 + 1] << "should be less than " << max_valid + 1; - return RET_ERROR; + MS_LOG(WARNING) << prefix << "paddings " << paddings[i * 2 + 1] << "should be less than " << max_valid + 1; } } return RET_OK; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/resize.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/resize.cc index 7ee2bb6d61..5b2eaa1341 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/resize.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/resize.cc @@ -179,6 +179,17 @@ int ResizeCPUKernel::RunImpl(int task_id) { break; } case static_cast(schema::ResizeMethod_NEAREST_NEIGHBOR): { + if (in_tensors_.size() == lite::kDoubleNum && !const_shape_) { + auto out_shape = in_tensors_.at(1); + auto data = reinterpret_cast(out_shape->MutableData()); + if (data == nullptr) { + MS_LOG(ERROR) << "The out shape data is nullptr."; + return RET_NULL_PTR; + } else { + out_tensors_[0]->shape()[1] = static_cast(data[0]); + out_tensors_[0]->shape()[2] = static_cast(data[1]); + } + } ret = ResizeNearestNeighbor(input_data, output_data, input_shape.data(), out_tensors_[0]->shape().data(), align_corners_, task_id, context_->thread_num_); break; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/shape.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/shape.cc index 5d5cd404b1..ffd09368c2 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/shape.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/shape.cc @@ -81,4 +81,5 @@ kernel::LiteKernel *CpuShapeFp32KernelCreator(const std::vector } REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_Shape, CpuShapeFp32KernelCreator) +REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_Shape, CpuShapeFp32KernelCreator) } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/squeeze_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/squeeze_int8.h index 9d717e2e8c..fb18ab405d 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/squeeze_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/squeeze_int8.h @@ -31,7 +31,9 @@ class SqueezeInt8CPUKernel : public SqueezeBaseCPUKernel { SqueezeInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const InnerContext *ctx, const mindspore::lite::PrimitiveC *primitive) - : SqueezeBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} + : SqueezeBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) { + para_ = reinterpret_cast(parameter); + } ~SqueezeInt8CPUKernel() override { delete quant_Squeeze_parm_; } int Init() override;