From 9ebd2dd044408658614053c39e2562b2774d9f29 Mon Sep 17 00:00:00 2001 From: zhaozhenlong Date: Wed, 31 Mar 2021 17:37:01 +0800 Subject: [PATCH] add argmax npu for net enhance update --- .../lite/src/runtime/kernel/npu/argmax_npu.cc | 76 +++++++++++++++++++ .../lite/src/runtime/kernel/npu/argmax_npu.h | 48 ++++++++++++ .../src/runtime/kernel/npu/reshape_npu.cc | 11 ++- mindspore/lite/test/models_npu.cfg | 1 + mindspore/lite/test/models_onnx.cfg | 1 + .../optimizer/fusion/conv_transform_fusion.cc | 2 +- 6 files changed, 136 insertions(+), 3 deletions(-) create mode 100644 mindspore/lite/src/runtime/kernel/npu/argmax_npu.cc create mode 100644 mindspore/lite/src/runtime/kernel/npu/argmax_npu.h diff --git a/mindspore/lite/src/runtime/kernel/npu/argmax_npu.cc b/mindspore/lite/src/runtime/kernel/npu/argmax_npu.cc new file mode 100644 index 00000000000..5a32a15835c --- /dev/null +++ b/mindspore/lite/src/runtime/kernel/npu/argmax_npu.cc @@ -0,0 +1,76 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "src/runtime/kernel/npu/argmax_npu.h" +#include +#include "include/graph/op/all_ops.h" +#include "src/kernel_registry.h" +#include "src/runtime/agent/npu/npu_converter_utils.h" + +using mindspore::kernel::KERNEL_ARCH::kNPU; +using mindspore::lite::KernelRegistrar; +using mindspore::schema::PrimitiveType_ArgMaxFusion; + +namespace mindspore::kernel { +int ArgmaxNPUKernel::IsSupport(const std::vector &inputs, const std::vector &outputs, + OpParameter *opParameter) { + return RET_OK; +} + +int ArgmaxNPUKernel::SetNPUInputs(const std::vector &inputs, const std::vector &outputs, + const std::vector &npu_inputs) { + op_ = new (std::nothrow) hiai::op::ArgMaxExt2(name_); + if (op_ == nullptr) { + MS_LOG(ERROR) << "New argmax npu operator for " << name_ << " failed."; + return RET_ERROR; + } + op_->set_input_x(*npu_inputs[0]); + auto axis_const_ = new (std::nothrow) hiai::op::Const(name_ + "_axis"); + if (axis_const_ == nullptr) { + MS_LOG(ERROR) << "New weight const failed."; + return RET_ERROR; + } + ge::TensorDesc tensor_desc(ge::Shape({1}), ge::FORMAT_NCHW, ge::DT_INT32); + std::shared_ptr ge_tensor = + std::make_shared(tensor_desc, reinterpret_cast(&(param_->axis_)), sizeof(int)); + if (ge_tensor == nullptr) { + MS_LOG(ERROR) << "new ge_tensor failed."; + return RET_ERROR; + } + axis_const_->set_attr_value(ge_tensor); + op_->set_input_axis(*axis_const_); + op_->set_attr_keep_dims(param_->keep_dims_); + op_->set_attr_outmaxval(param_->out_value_); + op_->set_attr_topk(param_->topk_); + + return RET_OK; +} + +ge::Operator *mindspore::kernel::ArgmaxNPUKernel::GetNPUOp() { return op_; } + +ArgmaxNPUKernel::~ArgmaxNPUKernel() { + if (op_ != nullptr) { + delete op_; + op_ = nullptr; + } + if (axis_const_ != nullptr) { + delete axis_const_; + axis_const_ = nullptr; + } +} + +REG_KERNEL(kNPU, kNumberTypeFloat32, PrimitiveType_ArgMaxFusion, NPUKernelCreator) +} // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/npu/argmax_npu.h b/mindspore/lite/src/runtime/kernel/npu/argmax_npu.h new file mode 100644 index 00000000000..1c5af525a4b --- /dev/null +++ b/mindspore/lite/src/runtime/kernel/npu/argmax_npu.h @@ -0,0 +1,48 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_ARGMAX_NPU_H_ +#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_ARGMAX_NPU_H_ + +#include +#include "include/graph/op/all_ops.h" +#include "include/graph/compatible/all_ops.h" +#include "src/runtime/kernel/npu/npu_kernel.h" +#include "nnacl/arg_min_max_parameter.h" + +namespace mindspore::kernel { +class ArgmaxNPUKernel : public NPUKernel { + public: + ArgmaxNPUKernel(OpParameter *parameter, const std::vector &inputs, + const std::vector &outputs, const lite::InnerContext *ctx) + : NPUKernel(parameter, inputs, outputs, ctx) { + param_ = reinterpret_cast(parameter); + } + ~ArgmaxNPUKernel() override; + + int IsSupport(const std::vector &inputs, const std::vector &outputs, + OpParameter *opParameter) override; + int SetNPUInputs(const std::vector &inputs, const std::vector &outputs, + const std::vector &npu_inputs) override; + ge::Operator *GetNPUOp() override; + + private: + hiai::op::ArgMaxExt2 *op_ = nullptr; + hiai::op::Const *axis_const_ = nullptr; + ArgMinMaxParameter *param_; +}; +} // namespace mindspore::kernel +#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_ARGMAX_NPU_H_ diff --git a/mindspore/lite/src/runtime/kernel/npu/reshape_npu.cc b/mindspore/lite/src/runtime/kernel/npu/reshape_npu.cc index 0e31280e77a..8a269c77fa7 100644 --- a/mindspore/lite/src/runtime/kernel/npu/reshape_npu.cc +++ b/mindspore/lite/src/runtime/kernel/npu/reshape_npu.cc @@ -26,10 +26,17 @@ using mindspore::schema::PrimitiveType_Reshape; namespace mindspore::kernel { int ReshapeNPUKernel::IsSupport(const std::vector &inputs, const std::vector &outputs, OpParameter *opParameter) { - if (reshape_param_->shape_dim_ == 0) { - MS_LOG(ERROR) << "Npu reshape op only supports const shape."; + if (inputs.size() == 1 && reshape_param_->shape_dim_ == 0) { + MS_LOG(WARNING) << "Npu reshape op only supports const shape."; return RET_ERROR; } + if (inputs.size() > 1) { + auto shape_tensor = inputs.at(1); + if (!shape_tensor->IsConst()) { + MS_LOG(WARNING) << "Npu reshape op only supports const shape."; + return RET_ERROR; + } + } return RET_OK; } diff --git a/mindspore/lite/test/models_npu.cfg b/mindspore/lite/test/models_npu.cfg index 150b60a3623..877e66d75a9 100644 --- a/mindspore/lite/test/models_npu.cfg +++ b/mindspore/lite/test/models_npu.cfg @@ -81,3 +81,4 @@ posenet_mobilenet_float_075_1_default_1.tflite 395 nasnet_mobile.tflite 1 ml_video_edit_art_generate.onnx 0.5 ml_video_edit_art_transfer.onnx 3 3 +ml_video_edit_enhance_update.onnx 0.5 diff --git a/mindspore/lite/test/models_onnx.cfg b/mindspore/lite/test/models_onnx.cfg index 4998f57b1c8..1674744a22e 100644 --- a/mindspore/lite/test/models_onnx.cfg +++ b/mindspore/lite/test/models_onnx.cfg @@ -72,3 +72,4 @@ mtk_face_features_v2.onnx;1,256,192,3 mtk_face_recognition_v3.onnx mtk_face_recognition_v2.onnx ml_2012_ocr_detection.onnx +ml_video_edit_enhance_update.onnx diff --git a/mindspore/lite/tools/optimizer/fusion/conv_transform_fusion.cc b/mindspore/lite/tools/optimizer/fusion/conv_transform_fusion.cc index 846a971bd55..6483b36ad0a 100644 --- a/mindspore/lite/tools/optimizer/fusion/conv_transform_fusion.cc +++ b/mindspore/lite/tools/optimizer/fusion/conv_transform_fusion.cc @@ -246,7 +246,7 @@ void ConvTransformFusion::CalNewWeightTensor(const CNodePtr &conv_node, const te int kernel_num, const float *trans_scale) const { MS_ASSERT(weight_data != nullptr); MS_ASSERT(trans_scale != nullptr); - if (weight_tensor->shape().size() != 4) { + if (weight_tensor->shape().size() > 4) { MS_LOG(ERROR) << "weight tensor shape error"; return; }