!9878 add npu arithmetic ops

From: @yeyunpeng2020
Reviewed-by: @zhang_xue_tong,@hangangqiang,@zhang_xue_tong
Signed-off-by: @zhang_xue_tong
This commit is contained in:
mindspore-ci-bot 2020-12-14 10:59:17 +08:00 committed by Gitee
commit 48f03d2272
22 changed files with 615 additions and 283 deletions

View File

@ -1,57 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "src/runtime/kernel/npu/add_npu.h"
#include "include/graph/op/all_ops.h"
#include "src/kernel_registry.h"
using mindspore::kernel::KERNEL_ARCH::kNPU;
using mindspore::lite::KernelRegistrar;
using mindspore::schema::PrimitiveType_Add;
namespace mindspore::kernel {
int AddNPUKernel::IsSupport(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs,
OpParameter *opParameter) {
if (inputs[0]->shape().size() != inputs[1]->shape().size()) {
MS_LOG(ERROR) << "For the two inputs, the corresponding dimensions must have the same value, or one of them is 1."
<< " shape 1 is:" << inputs[0]->shape() << " shape 2 is:" << inputs[1]->shape();
return RET_ERROR;
}
return RET_OK;
}
int AddNPUKernel::SetNPUInputs(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs,
const std::vector<ge::Operator *> &npu_inputs) {
op_ = new (std::nothrow) hiai::op::Add(name_);
if (op_ == nullptr) {
return RET_ERROR;
}
op_->set_input_x1(*npu_inputs[0]);
op_->set_input_x2(*npu_inputs[1]);
return RET_OK;
}
ge::Operator *mindspore::kernel::AddNPUKernel::GetNPUOp() { return this->op_; }
AddNPUKernel::~AddNPUKernel() {
if (op_ != nullptr) {
delete op_;
op_ = nullptr;
}
}
REG_KERNEL(kNPU, kNumberTypeFloat32, PrimitiveType_Add, NPUKernelCreator<AddNPUKernel>)
} // namespace mindspore::kernel

View File

@ -0,0 +1,168 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "src/runtime/kernel/npu/arithmetic_npu.h"
#include <string>
#include "include/graph/op/all_ops.h"
#include "src/kernel_registry.h"
using mindspore::kernel::KERNEL_ARCH::kNPU;
using mindspore::lite::KernelRegistrar;
using mindspore::schema::PrimitiveType_Add;
using mindspore::schema::PrimitiveType_Div;
using mindspore::schema::PrimitiveType_Equal;
using mindspore::schema::PrimitiveType_FloorDiv;
using mindspore::schema::PrimitiveType_FloorMod;
using mindspore::schema::PrimitiveType_Greater;
using mindspore::schema::PrimitiveType_GreaterEqual;
using mindspore::schema::PrimitiveType_Less;
using mindspore::schema::PrimitiveType_LessEqual;
using mindspore::schema::PrimitiveType_LogicalAnd;
using mindspore::schema::PrimitiveType_LogicalOr;
using mindspore::schema::PrimitiveType_Maximum;
using mindspore::schema::PrimitiveType_Minimum;
using mindspore::schema::PrimitiveType_Mul;
using mindspore::schema::PrimitiveType_NotEqual;
using mindspore::schema::PrimitiveType_SquaredDifference;
using mindspore::schema::PrimitiveType_Sub;
namespace mindspore::kernel {
int ArithmeticNPUKernel::IsSupport(const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, OpParameter *opParameter) {
if (primitive_->Type() == PrimitiveType_Mul || primitive_->Type() == PrimitiveType_Div) {
if (inputs[0]->shape() != inputs[1]->shape()) {
MS_LOG(WARNING) << "For the two inputs, the corresponding dimensions must have the same value."
<< " shape 1 is:" << inputs[0]->shape() << " shape 2 is:" << inputs[1]->shape();
return RET_ERROR;
}
}
if (primitive_->Type() == PrimitiveType_Add || primitive_->Type() == PrimitiveType_Sub) {
if (inputs[0]->shape().size() != inputs[1]->shape().size()) {
MS_LOG(WARNING)
<< "For the two inputs, the corresponding dimensions must have the same value, or one of them is 1."
<< " shape 1 is:" << inputs[0]->shape() << " shape 2 is:" << inputs[1]->shape();
return RET_ERROR;
}
}
return RET_OK;
}
template <typename T>
ge::Operator *CreateOperator(const std::vector<ge::Operator *> &npu_inputs, const std::string &name) {
auto op = new (std::nothrow) T(name);
if (op == nullptr) {
MS_LOG(ERROR) << name << " op is nullptr";
return nullptr;
}
op->set_input_x1(*npu_inputs[0]);
op->set_input_x2(*npu_inputs[1]);
return op;
}
int ArithmeticNPUKernel::SetNPUInputs(const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs,
const std::vector<ge::Operator *> &npu_inputs) {
ge::Operator *op;
switch (primitive_->Type()) {
case PrimitiveType_Mul:
op = CreateOperator<hiai::op::Mul>(npu_inputs, name_);
break;
case PrimitiveType_Add:
op = CreateOperator<hiai::op::Add>(npu_inputs, name_);
break;
case PrimitiveType_Sub:
op = CreateOperator<hiai::op::Sub>(npu_inputs, name_);
break;
case PrimitiveType_Div:
op = CreateOperator<hiai::op::RealDiv>(npu_inputs, name_);
break;
case PrimitiveType_FloorMod:
op = CreateOperator<hiai::op::FloorMod>(npu_inputs, name_);
break;
case PrimitiveType_FloorDiv:
op = CreateOperator<hiai::op::FloorDiv>(npu_inputs, name_);
break;
case PrimitiveType_LogicalAnd:
op = CreateOperator<hiai::op::LogicalAnd>(npu_inputs, name_);
break;
case PrimitiveType_LogicalOr:
op = CreateOperator<hiai::op::LogicalOr>(npu_inputs, name_);
break;
case PrimitiveType_Maximum:
op = CreateOperator<hiai::op::Maximum>(npu_inputs, name_);
break;
case PrimitiveType_SquaredDifference:
op = CreateOperator<hiai::op::SquaredDifference>(npu_inputs, name_);
break;
case PrimitiveType_NotEqual:
op = CreateOperator<hiai::op::NotEqual>(npu_inputs, name_);
break;
case PrimitiveType_Equal:
op = CreateOperator<hiai::op::Equal>(npu_inputs, name_);
break;
case PrimitiveType_Less:
op = CreateOperator<hiai::op::Less>(npu_inputs, name_);
break;
case PrimitiveType_LessEqual:
op = CreateOperator<hiai::op::LessEqual>(npu_inputs, name_);
break;
case PrimitiveType_Greater:
op = CreateOperator<hiai::op::Greater>(npu_inputs, name_);
break;
case PrimitiveType_GreaterEqual:
op = CreateOperator<hiai::op::GreaterEqual>(npu_inputs, name_);
break;
default:
MS_LOG(ERROR) << "Unsupported primitive type:"
<< schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(primitive_->Type()));
return RET_ERROR;
}
if (op == nullptr) {
MS_LOG(ERROR) << "Arithmetic create operator return nullptr.";
return RET_ERROR;
}
op_ = op;
return RET_OK;
}
ge::Operator *mindspore::kernel::ArithmeticNPUKernel::GetNPUOp() { return this->op_; }
ArithmeticNPUKernel::~ArithmeticNPUKernel() {
if (op_ != nullptr) {
delete op_;
op_ = nullptr;
}
}
REG_KERNEL(kNPU, kNumberTypeFloat32, PrimitiveType_Mul, NPUKernelCreator<ArithmeticNPUKernel>)
REG_KERNEL(kNPU, kNumberTypeFloat32, PrimitiveType_Add, NPUKernelCreator<ArithmeticNPUKernel>)
REG_KERNEL(kNPU, kNumberTypeFloat32, PrimitiveType_Sub, NPUKernelCreator<ArithmeticNPUKernel>)
REG_KERNEL(kNPU, kNumberTypeFloat32, PrimitiveType_Div, NPUKernelCreator<ArithmeticNPUKernel>)
REG_KERNEL(kNPU, kNumberTypeFloat32, PrimitiveType_FloorMod, NPUKernelCreator<ArithmeticNPUKernel>)
REG_KERNEL(kNPU, kNumberTypeFloat32, PrimitiveType_FloorDiv, NPUKernelCreator<ArithmeticNPUKernel>)
REG_KERNEL(kNPU, kNumberTypeFloat32, PrimitiveType_LogicalAnd, NPUKernelCreator<ArithmeticNPUKernel>)
REG_KERNEL(kNPU, kNumberTypeFloat32, PrimitiveType_LogicalOr, NPUKernelCreator<ArithmeticNPUKernel>)
REG_KERNEL(kNPU, kNumberTypeFloat32, PrimitiveType_Maximum, NPUKernelCreator<ArithmeticNPUKernel>)
REG_KERNEL(kNPU, kNumberTypeFloat32, PrimitiveType_Minimum, NPUKernelCreator<ArithmeticNPUKernel>)
REG_KERNEL(kNPU, kNumberTypeFloat32, PrimitiveType_SquaredDifference, NPUKernelCreator<ArithmeticNPUKernel>)
REG_KERNEL(kNPU, kNumberTypeFloat32, PrimitiveType_NotEqual, NPUKernelCreator<ArithmeticNPUKernel>)
REG_KERNEL(kNPU, kNumberTypeFloat32, PrimitiveType_Equal, NPUKernelCreator<ArithmeticNPUKernel>)
REG_KERNEL(kNPU, kNumberTypeFloat32, PrimitiveType_Less, NPUKernelCreator<ArithmeticNPUKernel>)
REG_KERNEL(kNPU, kNumberTypeFloat32, PrimitiveType_LessEqual, NPUKernelCreator<ArithmeticNPUKernel>)
REG_KERNEL(kNPU, kNumberTypeFloat32, PrimitiveType_Greater, NPUKernelCreator<ArithmeticNPUKernel>)
REG_KERNEL(kNPU, kNumberTypeFloat32, PrimitiveType_GreaterEqual, NPUKernelCreator<ArithmeticNPUKernel>)
} // namespace mindspore::kernel

View File

@ -14,19 +14,19 @@
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_ADD_NPU_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_ADD_NPU_H_
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_ARITHMETIC_NPU_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_ARITHMETIC_NPU_H_
#include <vector>
#include "src/runtime/kernel/npu/npu_kernel.h"
#include "include/graph/op/math_defs.h"
namespace mindspore::kernel {
class AddNPUKernel : public NPUKernel {
class ArithmeticNPUKernel : public NPUKernel {
public:
AddNPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx,
const mindspore::lite::PrimitiveC *primitive)
ArithmeticNPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx,
const mindspore::lite::PrimitiveC *primitive)
: NPUKernel(parameter, inputs, outputs, ctx, primitive) {}
~AddNPUKernel() override;
~ArithmeticNPUKernel() override;
int IsSupport(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs,
OpParameter *opParameter) override;
@ -36,7 +36,7 @@ class AddNPUKernel : public NPUKernel {
ge::Operator *GetNPUOp() override;
private:
hiai::op::Add *op_ = nullptr;
ge::Operator *op_ = nullptr;
};
} // namespace mindspore::kernel
#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_ADD_NPU_H_
#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_ARITHMETIC_NPU_H_

View File

@ -0,0 +1,126 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "src/runtime/kernel/npu/arithmetic_self_npu.h"
#include <string>
#include "include/graph/op/all_ops.h"
#include "src/kernel_registry.h"
using mindspore::kernel::KERNEL_ARCH::kNPU;
using mindspore::lite::KernelRegistrar;
using mindspore::schema::PrimitiveType_Ceil;
using mindspore::schema::PrimitiveType_Cos;
using mindspore::schema::PrimitiveType_Floor;
using mindspore::schema::PrimitiveType_Log;
using mindspore::schema::PrimitiveType_LogicalNot;
using mindspore::schema::PrimitiveType_Neg;
using mindspore::schema::PrimitiveType_Reciprocal;
using mindspore::schema::PrimitiveType_Round;
using mindspore::schema::PrimitiveType_Rsqrt;
using mindspore::schema::PrimitiveType_Sin;
using mindspore::schema::PrimitiveType_Sqrt;
using mindspore::schema::PrimitiveType_Square;
namespace mindspore::kernel {
int ArithmeticSelfNPUKernel::IsSupport(const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, OpParameter *opParameter) {
return RET_OK;
}
template <typename T>
ge::Operator *CreateOperator(ge::Operator *input, const std::string &name) {
auto op = new (std::nothrow) T(name);
if (op == nullptr) {
MS_LOG(ERROR) << name << " op is nullptr";
return nullptr;
}
op->set_input_x(*input);
return op;
}
int ArithmeticSelfNPUKernel::SetNPUInputs(const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs,
const std::vector<ge::Operator *> &npu_inputs) {
ge::Operator *op;
switch (primitive_->Type()) {
case PrimitiveType_Cos:
op = CreateOperator<hiai::op::Cos>(npu_inputs[0], name_);
break;
case PrimitiveType_Log:
op = CreateOperator<hiai::op::Log>(npu_inputs[0], name_);
break;
case PrimitiveType_Square:
op = CreateOperator<hiai::op::Square>(npu_inputs[0], name_);
break;
case PrimitiveType_Sqrt:
op = CreateOperator<hiai::op::Sqrt>(npu_inputs[0], name_);
break;
case PrimitiveType_Sin:
op = CreateOperator<hiai::op::Sin>(npu_inputs[0], name_);
break;
case PrimitiveType_LogicalNot:
op = CreateOperator<hiai::op::LogicalNot>(npu_inputs[0], name_);
break;
case PrimitiveType_Floor:
op = CreateOperator<hiai::op::Floor>(npu_inputs[0], name_);
break;
case PrimitiveType_Ceil:
op = CreateOperator<hiai::op::Ceil>(npu_inputs[0], name_);
break;
case PrimitiveType_Round:
op = CreateOperator<hiai::op::Round>(npu_inputs[0], name_);
break;
case PrimitiveType_Neg:
op = CreateOperator<hiai::op::Neg>(npu_inputs[0], name_);
break;
case PrimitiveType_Reciprocal:
op = CreateOperator<hiai::op::Reciprocal>(npu_inputs[0], name_);
break;
default:
MS_LOG(ERROR) << "Unsupported primitive type:"
<< schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(primitive_->Type()));
return RET_ERROR;
}
if (op == nullptr) {
MS_LOG(ERROR) << "Arithmetic self create operator return nullptr.";
return RET_ERROR;
}
op_ = op;
return RET_OK;
}
ge::Operator *mindspore::kernel::ArithmeticSelfNPUKernel::GetNPUOp() { return this->op_; }
ArithmeticSelfNPUKernel::~ArithmeticSelfNPUKernel() {
if (op_ != nullptr) {
delete op_;
op_ = nullptr;
}
}
REG_KERNEL(kNPU, kNumberTypeFloat32, PrimitiveType_Cos, NPUKernelCreator<ArithmeticSelfNPUKernel>)
REG_KERNEL(kNPU, kNumberTypeFloat32, PrimitiveType_Log, NPUKernelCreator<ArithmeticSelfNPUKernel>)
REG_KERNEL(kNPU, kNumberTypeFloat32, PrimitiveType_Square, NPUKernelCreator<ArithmeticSelfNPUKernel>)
REG_KERNEL(kNPU, kNumberTypeFloat32, PrimitiveType_Sqrt, NPUKernelCreator<ArithmeticSelfNPUKernel>)
REG_KERNEL(kNPU, kNumberTypeFloat32, PrimitiveType_Rsqrt, NPUKernelCreator<ArithmeticSelfNPUKernel>)
REG_KERNEL(kNPU, kNumberTypeFloat32, PrimitiveType_Sin, NPUKernelCreator<ArithmeticSelfNPUKernel>)
REG_KERNEL(kNPU, kNumberTypeFloat32, PrimitiveType_LogicalNot, NPUKernelCreator<ArithmeticSelfNPUKernel>)
REG_KERNEL(kNPU, kNumberTypeFloat32, PrimitiveType_Floor, NPUKernelCreator<ArithmeticSelfNPUKernel>)
REG_KERNEL(kNPU, kNumberTypeFloat32, PrimitiveType_Ceil, NPUKernelCreator<ArithmeticSelfNPUKernel>)
REG_KERNEL(kNPU, kNumberTypeFloat32, PrimitiveType_Round, NPUKernelCreator<ArithmeticSelfNPUKernel>)
REG_KERNEL(kNPU, kNumberTypeFloat32, PrimitiveType_Neg, NPUKernelCreator<ArithmeticSelfNPUKernel>)
REG_KERNEL(kNPU, kNumberTypeFloat32, PrimitiveType_Reciprocal, NPUKernelCreator<ArithmeticSelfNPUKernel>)
} // namespace mindspore::kernel

View File

@ -14,28 +14,29 @@
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_MUL_NPU_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_MUL_NPU_H_
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_ARITHMETICSELF_NPU_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_ARITHMETICSELF_NPU_H_
#include <vector>
#include "src/runtime/kernel/npu/npu_kernel.h"
#include "include/graph/op/math_defs.h"
namespace mindspore::kernel {
class MulNPUKernel : public NPUKernel {
class ArithmeticSelfNPUKernel : public NPUKernel {
public:
MulNPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx,
const mindspore::lite::PrimitiveC *primitive)
ArithmeticSelfNPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx,
const mindspore::lite::PrimitiveC *primitive)
: NPUKernel(parameter, inputs, outputs, ctx, primitive) {}
~MulNPUKernel() override;
~ArithmeticSelfNPUKernel() override;
int IsSupport(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs,
OpParameter *opParameter) override;
int SetNPUInputs(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs,
const std::vector<ge::Operator *> &npu_inputs) override;
ge::Operator *GetNPUOp() override;
private:
hiai::op::Mul *op_ = nullptr;
ge::Operator *op_ = nullptr;
};
} // namespace mindspore::kernel
#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_Mul_NPU_H_
#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_ARITHMETICSELF_NPU_H_

View File

@ -0,0 +1,53 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "src/runtime/kernel/npu/cast_npu.h"
#include "src/kernel_registry.h"
#include "src/runtime/agent/npu/npu_converter_utils.h"
using mindspore::kernel::KERNEL_ARCH::kNPU;
using mindspore::lite::KernelRegistrar;
using mindspore::schema::PrimitiveType_Cast;
namespace mindspore::kernel {
int CastNPUKernel::IsSupport(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs,
OpParameter *opParameter) {
return RET_OK;
}
int CastNPUKernel::SetNPUInputs(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs,
const std::vector<ge::Operator *> &npu_inputs) {
op_ = new (std::nothrow) hiai::op::CastT(name_);
if (op_ == nullptr) {
MS_LOG(ERROR) << name_ << " op is nullptr";
return RET_ERROR;
}
op_->set_input_x(*npu_inputs[0]);
op_->set_attr_dst_dtype(lite::ConverterToNPUDataType(dst_type_));
op_->set_attr_src_dtype(lite::ConverterToNPUDataType(src_type_));
return RET_OK;
}
ge::Operator *mindspore::kernel::CastNPUKernel::GetNPUOp() { return this->op_; }
CastNPUKernel::~CastNPUKernel() {
if (op_ != nullptr) {
delete op_;
op_ = nullptr;
}
}
REG_KERNEL(kNPU, kNumberTypeFloat32, PrimitiveType_Cast, NPUKernelCreator<CastNPUKernel>)
} // namespace mindspore::kernel

View File

@ -0,0 +1,48 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_CAST_NPU_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_CAST_NPU_H_
#include <vector>
#include "src/runtime/kernel/npu/npu_kernel.h"
#include "include/graph/op/all_ops.h"
#include "nnacl/fp32/cast_fp32.h"
namespace mindspore::kernel {
class CastNPUKernel : public NPUKernel {
public:
CastNPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx,
const mindspore::lite::PrimitiveC *primitive)
: NPUKernel(parameter, inputs, outputs, ctx, primitive) {
auto cast_parameter = reinterpret_cast<CastParameter *>(parameter);
dst_type_ = static_cast<TypeId>(cast_parameter->dst_type_);
src_type_ = static_cast<TypeId>(cast_parameter->src_type_);
}
~CastNPUKernel() override;
int IsSupport(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs,
OpParameter *opParameter) override;
int SetNPUInputs(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs,
const std::vector<ge::Operator *> &npu_inputs) override;
ge::Operator *GetNPUOp() override;
private:
hiai::op::CastT *op_ = nullptr;
TypeId dst_type_;
TypeId src_type_;
};
} // namespace mindspore::kernel
#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_CAST_NPU_H_

View File

@ -31,6 +31,7 @@ int ConcatNPUKernel::SetNPUInputs(const std::vector<lite::Tensor *> &inputs, con
const std::vector<ge::Operator *> &npu_inputs) {
op_ = new (std::nothrow) hiai::op::ConcatD(name_);
if (op_ == nullptr) {
MS_LOG(ERROR) << name_ << " op is nullptr";
return RET_ERROR;
}
op_->set_attr_concat_dim(axis_);

View File

@ -1,57 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "src/runtime/kernel/npu/div_npu.h"
#include "include/graph/op/all_ops.h"
#include "src/kernel_registry.h"
using mindspore::kernel::KERNEL_ARCH::kNPU;
using mindspore::lite::KernelRegistrar;
using mindspore::schema::PrimitiveType_Div;
namespace mindspore::kernel {
int DivNPUKernel::IsSupport(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs,
OpParameter *opParameter) {
if (inputs[0]->shape() != inputs[1]->shape()) {
MS_LOG(ERROR) << "For the two inputs, the corresponding dimensions must have the same value, or one of them is 1."
<< " shape 1 is:" << inputs[0]->shape() << " shape 2 is:" << inputs[1]->shape();
return RET_ERROR;
}
return RET_OK;
}
int DivNPUKernel::SetNPUInputs(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs,
const std::vector<ge::Operator *> &npu_inputs) {
op_ = new (std::nothrow) hiai::op::RealDiv(name_);
if (op_ == nullptr) {
return RET_ERROR;
}
op_->set_input_x1(*npu_inputs[0]);
op_->set_input_x2(*npu_inputs[1]);
return RET_OK;
}
ge::Operator *mindspore::kernel::DivNPUKernel::GetNPUOp() { return this->op_; }
DivNPUKernel::~DivNPUKernel() {
if (op_ != nullptr) {
delete op_;
op_ = nullptr;
}
}
REG_KERNEL(kNPU, kNumberTypeFloat32, PrimitiveType_Div, NPUKernelCreator<DivNPUKernel>)
} // namespace mindspore::kernel

View File

@ -32,8 +32,10 @@ int EltwiseNPUKernel::IsSupport(const std::vector<lite::Tensor *> &inputs, const
int EltwiseNPUKernel::SetNPUInputs(const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs,
const std::vector<ge::Operator *> &npu_inputs) {
MS_LOG(ERROR) << name_;
op_ = new (std::nothrow) hiai::op::Eltwise(name_);
if (op_ == nullptr) {
MS_LOG(ERROR) << name_ << " op is nullptr";
return RET_ERROR;
}
op_->set_attr_mode(lite::ConverterToNPUEltwiseMode(mode_));

View File

@ -14,44 +14,44 @@
* limitations under the License.
*/
#include "src/runtime/kernel/npu/mul_npu.h"
#include "include/graph/op/all_ops.h"
#include "src/runtime/kernel/npu/gather_npu.h"
#include "src/kernel_registry.h"
#include "src/runtime/agent/npu/npu_converter_utils.h"
using mindspore::kernel::KERNEL_ARCH::kNPU;
using mindspore::lite::KernelRegistrar;
using mindspore::schema::PrimitiveType_Mul;
using mindspore::schema::PrimitiveType_Gather;
namespace mindspore::kernel {
int MulNPUKernel::IsSupport(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs,
OpParameter *opParameter) {
if (inputs[0]->shape() != inputs[1]->shape()) {
MS_LOG(ERROR) << "For the two inputs, the corresponding dimensions must have the same value."
<< " shape 1 is:" << inputs[0]->shape() << " shape 2 is:" << inputs[1]->shape();
int GatherNPUKernel::IsSupport(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs,
OpParameter *opParameter) {
if (inputs[1]->data_type() != kNumberTypeInt32) {
MS_LOG(WARNING) << "Gather indices only support Int32";
return RET_ERROR;
}
return RET_OK;
}
int MulNPUKernel::SetNPUInputs(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs,
const std::vector<ge::Operator *> &npu_inputs) {
op_ = new (std::nothrow) hiai::op::Mul(name_);
int GatherNPUKernel::SetNPUInputs(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs,
const std::vector<ge::Operator *> &npu_inputs) {
op_ = new (std::nothrow) hiai::op::GatherV2D(name_);
if (op_ == nullptr) {
MS_LOG(ERROR) << name_ << " op is nullptr";
return RET_ERROR;
}
op_->set_input_x1(*npu_inputs[0]);
op_->set_input_x2(*npu_inputs[1]);
op_->set_input_x(*npu_inputs[0]);
op_->set_input_indices(*npu_inputs[1]);
op_->set_attr_axis(axis_);
return RET_OK;
}
ge::Operator *mindspore::kernel::MulNPUKernel::GetNPUOp() { return this->op_; }
ge::Operator *mindspore::kernel::GatherNPUKernel::GetNPUOp() { return this->op_; }
MulNPUKernel::~MulNPUKernel() {
GatherNPUKernel::~GatherNPUKernel() {
if (op_ != nullptr) {
delete op_;
op_ = nullptr;
}
}
REG_KERNEL(kNPU, kNumberTypeFloat32, PrimitiveType_Mul, NPUKernelCreator<MulNPUKernel>)
REG_KERNEL(kNPU, kNumberTypeFloat32, PrimitiveType_Gather, NPUKernelCreator<GatherNPUKernel>)
} // namespace mindspore::kernel

View File

@ -14,19 +14,23 @@
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_DIV_NPU_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_DIV_NPU_H_
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_GATHER_NPU_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_GATHER_NPU_H_
#include <vector>
#include "src/runtime/kernel/npu/npu_kernel.h"
#include "include/graph/op/math_defs.h"
#include "include/graph/op/all_ops.h"
#include "nnacl/gather_parameter.h"
namespace mindspore::kernel {
class DivNPUKernel : public NPUKernel {
class GatherNPUKernel : public NPUKernel {
public:
DivNPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx,
const mindspore::lite::PrimitiveC *primitive)
: NPUKernel(parameter, inputs, outputs, ctx, primitive) {}
~DivNPUKernel() override;
GatherNPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx,
const mindspore::lite::PrimitiveC *primitive)
: NPUKernel(parameter, inputs, outputs, ctx, primitive) {
auto gather_parameter = reinterpret_cast<GatherParameter *>(parameter);
axis_ = gather_parameter->axis_;
}
~GatherNPUKernel() override;
int IsSupport(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs,
OpParameter *opParameter) override;
@ -35,7 +39,8 @@ class DivNPUKernel : public NPUKernel {
ge::Operator *GetNPUOp() override;
private:
hiai::op::RealDiv *op_ = nullptr;
hiai::op::GatherV2D *op_ = nullptr;
int axis_;
};
} // namespace mindspore::kernel
#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_DIV_NPU_H_
#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_GATHER_NPU_H_

View File

@ -33,6 +33,7 @@ int ReshapeNPUKernel::SetNPUInputs(const std::vector<lite::Tensor *> &inputs,
const std::vector<ge::Operator *> &npu_inputs) {
op_ = new (std::nothrow) hiai::op::Reshape(name_);
if (op_ == nullptr) {
MS_LOG(ERROR) << name_ << " op is nullptr";
return RET_ERROR;
}
op_->set_input_x(*npu_inputs[0]);

View File

@ -28,10 +28,10 @@ namespace mindspore::kernel {
int ResizeNPUKernel::IsSupport(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs,
OpParameter *opParameter) {
if (method_ != schema::ResizeMethod_LINEAR || method_ == schema::ResizeMethod_NEAREST) {
MS_LOG(ERROR) << "Unsupported resize method type:" << method_;
MS_LOG(WARNING) << "Unsupported resize method type:" << method_;
return RET_ERROR;
}
return RET_OK;
return RET_ERROR;
}
int ResizeNPUKernel::SetNPUInputs(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs,
@ -42,20 +42,21 @@ int ResizeNPUKernel::SetNPUInputs(const std::vector<lite::Tensor *> &inputs, con
sizeTensor->SetData(reinterpret_cast<uint8_t *>(dataValue.data()), 2 * sizeof(int32_t));
auto out_size = new (std::nothrow) hiai::op::Const(name_ + "size");
out_size->set_attr_value(sizeTensor);
if (method_ == schema::ResizeMethod_LINEAR) {
auto op = new (std::nothrow) hiai::op::ResizeBilinearV2(name_);
if (op_ == nullptr) {
if (op == nullptr) {
MS_LOG(ERROR) << " op is nullptr.";
return RET_ERROR;
}
op->set_attr_align_corners(false);
op->set_attr_align_corners(align_corners_);
op->set_input_x(*npu_inputs[0]);
op->set_input_size(*out_size);
op->set_attr_half_pixel_centers(true);
op->set_attr_half_pixel_centers(preserve_aspect_ratio_);
op_ = op;
} else {
auto op = new (std::nothrow) hiai::op::ResizeNearestNeighborV2(name_);
if (op_ == nullptr) {
if (op == nullptr) {
MS_LOG(ERROR) << " op is nullptr.";
return RET_ERROR;
}
op->set_attr_align_corners(align_corners_);

View File

@ -31,6 +31,7 @@ int ScaleNPUKernel::SetNPUInputs(const std::vector<lite::Tensor *> &inputs, cons
const std::vector<ge::Operator *> &npu_inputs) {
op_ = new (std::nothrow) hiai::op::Scale(name_);
if (op_ == nullptr) {
MS_LOG(ERROR) << name_ << " op is nullptr";
return RET_ERROR;
}
op_->set_attr_axis(this->axis_);

View File

@ -14,37 +14,38 @@
* limitations under the License.
*/
#include "src/runtime/kernel/npu/floor_npu.h"
#include "src/runtime/kernel/npu/shape_npu.h"
#include "src/kernel_registry.h"
#include "src/runtime/agent/npu/npu_converter_utils.h"
using mindspore::kernel::KERNEL_ARCH::kNPU;
using mindspore::lite::KernelRegistrar;
using mindspore::schema::PrimitiveType_Floor;
using mindspore::schema::PrimitiveType_Shape;
namespace mindspore::kernel {
int FloorNPUKernel::IsSupport(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs,
int ShapeNPUKernel::IsSupport(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs,
OpParameter *opParameter) {
return RET_OK;
}
int FloorNPUKernel::SetNPUInputs(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs,
int ShapeNPUKernel::SetNPUInputs(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs,
const std::vector<ge::Operator *> &npu_inputs) {
op_ = new (std::nothrow) hiai::op::Floor(name_);
op_ = new (std::nothrow) hiai::op::Shape(name_);
if (op_ == nullptr) {
MS_LOG(ERROR) << name_ << " op is nullptr";
return RET_ERROR;
}
op_->set_input_x(*npu_inputs[0]);
return RET_OK;
}
ge::Operator *mindspore::kernel::FloorNPUKernel::GetNPUOp() { return this->op_; }
ge::Operator *mindspore::kernel::ShapeNPUKernel::GetNPUOp() { return this->op_; }
FloorNPUKernel::~FloorNPUKernel() {
ShapeNPUKernel::~ShapeNPUKernel() {
if (op_ != nullptr) {
delete op_;
op_ = nullptr;
}
}
REG_KERNEL(kNPU, kNumberTypeFloat32, PrimitiveType_Floor, NPUKernelCreator<FloorNPUKernel>)
REG_KERNEL(kNPU, kNumberTypeFloat32, PrimitiveType_Shape, NPUKernelCreator<ShapeNPUKernel>)
} // namespace mindspore::kernel

View File

@ -14,19 +14,19 @@
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_FLOOR_NPU_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_FLOOR_NPU_H_
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_SHAPE_NPU_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_SHAPE_NPU_H_
#include <vector>
#include "src/runtime/kernel/npu/npu_kernel.h"
#include "include/graph/op/math_defs.h"
#include "include/graph/op/all_ops.h"
namespace mindspore::kernel {
class FloorNPUKernel : public NPUKernel {
class ShapeNPUKernel : public NPUKernel {
public:
FloorNPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs,
ShapeNPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx,
const mindspore::lite::PrimitiveC *primitive)
: NPUKernel(parameter, inputs, outputs, ctx, primitive) {}
~FloorNPUKernel() override;
~ShapeNPUKernel() override;
int IsSupport(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs,
OpParameter *opParameter) override;
@ -35,7 +35,7 @@ class FloorNPUKernel : public NPUKernel {
ge::Operator *GetNPUOp() override;
private:
hiai::op::Floor *op_ = nullptr;
hiai::op::Shape *op_ = nullptr;
};
} // namespace mindspore::kernel
#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_FLOOR_NPU_H_
#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_SHAPE_NPU_H_

View File

@ -32,6 +32,7 @@ int SoftmaxNPUKernel::SetNPUInputs(const std::vector<lite::Tensor *> &inputs,
const std::vector<ge::Operator *> &npu_inputs) {
op_ = new (std::nothrow) hiai::op::Softmax(name_);
if (op_ == nullptr) {
MS_LOG(ERROR) << name_ << " op is nullptr";
return RET_ERROR;
}
if (axis_ == -1) {

View File

@ -0,0 +1,80 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "src/runtime/kernel/npu/strided_slice_npu.h"
#include "src/kernel_registry.h"
#include "src/runtime/agent/npu/npu_converter_utils.h"
using mindspore::kernel::KERNEL_ARCH::kNPU;
using mindspore::lite::KernelRegistrar;
using mindspore::schema::PrimitiveType_StridedSlice;
namespace mindspore::kernel {
int StridedSliceNPUKernel::IsSupport(const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, OpParameter *opParameter) {
// Only onnx StridedSlice has 5 inputs, of which the 4th input is axes and the 5th input is strides.
if (inputs.size() == 5) {
vector<int> axes;
size_t size = inputs[4]->shape()[0];
axes.resize(size);
memcpy(axes.data(), inputs[4]->data_c(), sizeof(int) * size);
for (int i = 0; i < axes.size(); ++i) {
if (i != axes[i]) {
MS_LOG(ERROR) << "Does not support setting axis, so the axis must be continuous.";
return RET_ERROR;
}
}
}
return RET_OK;
}
int StridedSliceNPUKernel::SetNPUInputs(const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs,
const std::vector<ge::Operator *> &npu_inputs) {
// StridedSliceV2 supports setting axes, but it will cause an endless loop.
op_ = new (std::nothrow) hiai::op::StridedSlice(name_);
if (op_ == nullptr) {
MS_LOG(ERROR) << name_ << " op is nullptr";
return RET_ERROR;
}
op_->set_input_x(*npu_inputs[0]);
op_->set_input_begin(*npu_inputs[1]);
op_->set_input_end(*npu_inputs[2]);
// The strides position of onnx is the 5th, and the others are the 4th.
if (npu_inputs.size() == 5) {
op_->set_input_strides(*npu_inputs[4]);
} else {
op_->set_input_strides(*npu_inputs[3]);
}
op_->set_attr_begin_mask(begin_mask_);
op_->set_attr_ellipsis_mask(ellipsis_mask_);
op_->set_attr_end_mask(end_mask_);
op_->set_attr_shrink_axis_mask(shrink_axis_mask_);
op_->set_attr_new_axis_mask(new_axis_mask_);
return RET_OK;
}
ge::Operator *mindspore::kernel::StridedSliceNPUKernel::GetNPUOp() { return this->op_; }
StridedSliceNPUKernel::~StridedSliceNPUKernel() {
if (op_ != nullptr) {
delete op_;
op_ = nullptr;
}
}
REG_KERNEL(kNPU, kNumberTypeFloat32, PrimitiveType_StridedSlice, NPUKernelCreator<StridedSliceNPUKernel>)
} // namespace mindspore::kernel

View File

@ -0,0 +1,55 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_STRIDEDSLICE_NPU_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_STRIDEDSLICE_NPU_H_
#include <vector>
#include "src/ops/strided_slice.h"
#include "nnacl/strided_slice.h"
#include "src/runtime/kernel/npu/npu_kernel.h"
#include "include/graph/op/all_ops.h"
namespace mindspore::kernel {
class StridedSliceNPUKernel : public NPUKernel {
public:
StridedSliceNPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx,
const mindspore::lite::PrimitiveC *primitive)
: NPUKernel(parameter, inputs, outputs, ctx, primitive) {
auto strided_slice = reinterpret_cast<const mindspore::lite::StridedSlice *>(primitive);
begin_mask_ = strided_slice->GetBeginMask();
end_mask_ = strided_slice->GetEndMask();
ellipsis_mask_ = strided_slice->GetEllipsisMask();
new_axis_mask_ = strided_slice->GetNewAxisMask();
shrink_axis_mask_ = strided_slice->GetShrinkAxisMask();
}
~StridedSliceNPUKernel() override;
int IsSupport(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs,
OpParameter *opParameter) override;
int SetNPUInputs(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs,
const std::vector<ge::Operator *> &npu_inputs) override;
ge::Operator *GetNPUOp() override;
private:
hiai::op::StridedSlice *op_ = nullptr;
int begin_mask_;
int end_mask_;
int ellipsis_mask_;
int new_axis_mask_;
int shrink_axis_mask_;
};
} // namespace mindspore::kernel
#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_STRIDEDSLICE_NPU_H_

View File

@ -1,57 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "src/runtime/kernel/npu/sub_npu.h"
#include "include/graph/op/all_ops.h"
#include "src/kernel_registry.h"
using mindspore::kernel::KERNEL_ARCH::kNPU;
using mindspore::lite::KernelRegistrar;
using mindspore::schema::PrimitiveType_Sub;
namespace mindspore::kernel {
int SubNPUKernel::IsSupport(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs,
OpParameter *opParameter) {
if (inputs[0]->shape().size() != inputs[1]->shape().size()) {
MS_LOG(ERROR) << "For the two inputs, the corresponding dimensions must have the same value."
<< " shape 1 is:" << inputs[0]->shape() << " shape 2 is:" << inputs[1]->shape();
return RET_ERROR;
}
return RET_OK;
}
int SubNPUKernel::SetNPUInputs(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs,
const std::vector<ge::Operator *> &npu_inputs) {
op_ = new (std::nothrow) hiai::op::Sub(name_);
if (op_ == nullptr) {
return RET_ERROR;
}
op_->set_input_x1(*npu_inputs[0]);
op_->set_input_x2(*npu_inputs[1]);
return RET_OK;
}
ge::Operator *mindspore::kernel::SubNPUKernel::GetNPUOp() { return this->op_; }
SubNPUKernel::~SubNPUKernel() {
if (op_ != nullptr) {
delete op_;
op_ = nullptr;
}
}
REG_KERNEL(kNPU, kNumberTypeFloat32, PrimitiveType_Sub, NPUKernelCreator<SubNPUKernel>)
} // namespace mindspore::kernel

View File

@ -1,41 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_SUB_NPU_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_SUB_NPU_H_
#include <vector>
#include "src/runtime/kernel/npu/npu_kernel.h"
#include "include/graph/op/math_defs.h"
namespace mindspore::kernel {
class SubNPUKernel : public NPUKernel {
public:
SubNPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx,
const mindspore::lite::PrimitiveC *primitive)
: NPUKernel(parameter, inputs, outputs, ctx, primitive) {}
~SubNPUKernel() override;
int IsSupport(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs,
OpParameter *opParameter) override;
int SetNPUInputs(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs,
const std::vector<ge::Operator *> &npu_inputs) override;
ge::Operator *GetNPUOp() override;
private:
hiai::op::Sub *op_ = nullptr;
};
} // namespace mindspore::kernel
#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_SUB_NPU_H_