[MSLITE] simplify base op

This commit is contained in:
ling 2020-12-22 20:41:02 +08:00
parent 5d4a308aca
commit 25320f6fa4
113 changed files with 535 additions and 2753 deletions

View File

@ -24,6 +24,7 @@ typedef struct BatchToSpaceParameter {
OpParameter op_parameter_;
int32_t block_shape_[BATCH_TO_SPACE_BLOCK_SHAPE_SIZE];
int32_t crops_[BATCH_TO_SPACE_CROPS_SIZE];
bool no_crop_;
} BatchToSpaceParameter;
#ifdef __cplusplus

View File

@ -227,6 +227,27 @@ class LiteKernelUtil {
static int SetInput(LiteKernel &kernelMod, const std::vector<lite::Tensor *> &inputs);
};
template <class T>
kernel::LiteKernel *CPUKernelCreator(const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, OpParameter *parameter,
const lite::InnerContext *ctx, const kernel::KernelKey &desc,
const mindspore::lite::PrimitiveC *primitive) {
auto *kernel = new (std::nothrow) T(parameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "kernel: " << parameter->name_ << "is nullptr.";
free(parameter);
return nullptr;
}
auto ret = kernel->Init();
if (ret != lite::RET_OK) {
MS_LOG(ERROR) << "Init kernel failed, name: " << parameter->name_;
delete kernel;
return nullptr;
}
return kernel;
}
} // namespace mindspore::kernel
#endif // MINDSPORE_LITE_SRC_LITE_KERNEL_H_

View File

@ -51,8 +51,12 @@ OpParameter *PopulateBatchToSpaceParameter(const mindspore::lite::PrimitiveC *pr
batch_space_param->block_shape_[i] = block_shape[i];
}
batch_space_param->no_crop_ = true;
for (int i = 0; i < BATCH_TO_SPACE_CROPS_SIZE; ++i) {
batch_space_param->crops_[i] = crops[i];
if (batch_space_param->crops_[i] != 0) {
batch_space_param->no_crop_ = false;
}
}
return reinterpret_cast<OpParameter *>(batch_space_param);
}

View File

@ -24,7 +24,6 @@ using mindspore::lite::RET_OK;
using mindspore::schema::PrimitiveType_Assert;
namespace mindspore::kernel {
int AssertCPUKernel::Init() { return RET_OK; }
int AssertCPUKernel::ReSize() { return RET_OK; }
@ -41,37 +40,6 @@ int AssertCPUKernel::Run() {
}
}
kernel::LiteKernel *CpuAssertKernelCreator(const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, OpParameter *parameter,
const lite::InnerContext *ctx, const KernelKey &desc,
const mindspore::lite::PrimitiveC *primitive) {
if (parameter == nullptr) {
MS_LOG(ERROR) << "parameter is nullptr";
return nullptr;
}
if (ctx == nullptr) {
MS_LOG(ERROR) << "ctx is nullptr";
free(parameter);
return nullptr;
}
MS_ASSERT(desc.type == PrimitiveType_Assert);
auto *kernel = new (std::nothrow) AssertCPUKernel(parameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "Create kernel failed, name: " << parameter->name_;
free(parameter);
return nullptr;
}
auto ret = kernel->Init();
if (ret != RET_OK) {
MS_LOG(ERROR) << "Init kernel failed, name: " << parameter->name_
<< ", type: " << schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(parameter->type_));
delete kernel;
return nullptr;
}
return kernel;
}
REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_Assert, CpuAssertKernelCreator)
REG_KERNEL(kCPU, kNumberTypeBool, PrimitiveType_Assert, CpuAssertKernelCreator)
REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_Assert, CPUKernelCreator<AssertCPUKernel>)
REG_KERNEL(kCPU, kNumberTypeBool, PrimitiveType_Assert, CPUKernelCreator<AssertCPUKernel>)
} // namespace mindspore::kernel

View File

@ -20,27 +20,17 @@
#include "src/lite_kernel.h"
namespace mindspore::kernel {
typedef struct AssertParameter {
OpParameter op_parameter_;
} AssertParameter;
class AssertCPUKernel : public LiteKernel {
public:
AssertCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx,
const mindspore::lite::PrimitiveC *primitive)
: LiteKernel(parameter, inputs, outputs, ctx, primitive) {
assert_param_ = reinterpret_cast<AssertParameter *>(op_parameter_);
}
: LiteKernel(parameter, inputs, outputs, ctx, primitive) {}
~AssertCPUKernel() override {}
int Init() override;
int ReSize() override;
int Run() override;
private:
AssertParameter *assert_param_ = nullptr;
};
} // namespace mindspore::kernel

View File

@ -1,83 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "src/runtime/kernel/arm/base/batch_to_space_base.h"
#include "nnacl/batch_to_space.h"
#include "src/runtime/kernel/arm/fp32/batch_to_space_fp32.h"
#include "schema/model_generated.h"
#include "src/kernel_registry.h"
#include "include/errorcode.h"
#include "include/context.h"
using mindspore::lite::KernelRegistrar;
using mindspore::lite::RET_ERROR;
using mindspore::lite::RET_FORMAT_ERR;
using mindspore::lite::RET_OK;
using mindspore::schema::PrimitiveType_BatchToSpace;
using mindspore::schema::PrimitiveType_BatchToSpaceND;
namespace mindspore::kernel {
int BatchToSpaceBaseCPUKernel::Init() {
if (in_tensors_.at(0)->format() != schema::Format::Format_NHWC) {
MS_LOG(ERROR) << "batch_to_space only support NHWC now!";
return RET_FORMAT_ERR;
}
BatchToSpaceParameter *param = reinterpret_cast<BatchToSpaceParameter *>(this->op_parameter_);
for (int i = 0; i < BATCH_TO_SPACE_CROPS_SIZE; ++i) {
if (param->crops_[i] != 0) {
no_crop_ = false;
}
}
return RET_OK;
}
int BatchToSpaceBaseCPUKernel::ReSize() {
auto shape = in_tensors_.at(0)->shape();
if (shape.size() != 4) {
MS_LOG(ERROR) << "Unsupport shape size: " << shape.size();
return RET_ERROR;
}
return RET_OK;
}
kernel::LiteKernel *CpuBatchToSpaceFp32KernelCreator(const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs,
OpParameter *op_parameter, const lite::InnerContext *ctx,
const kernel::KernelKey &desc,
const mindspore::lite::PrimitiveC *primitive) {
if (op_parameter == nullptr) {
MS_LOG(ERROR) << "Input op_parameter is nullptr!";
return nullptr;
}
auto *kernel = new (std::nothrow) BatchToSpaceCPUKernel(op_parameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new BatchToSpaceCPUKernel fail!";
free(op_parameter);
return nullptr;
}
auto ret = kernel->Init();
if (ret != RET_OK) {
MS_LOG(ERROR) << "Init kernel failed, name: " << op_parameter->name_ << ", type: "
<< schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(op_parameter->type_));
delete kernel;
return nullptr;
}
return kernel;
}
REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_BatchToSpace, CpuBatchToSpaceFp32KernelCreator)
REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_BatchToSpaceND, CpuBatchToSpaceFp32KernelCreator)
} // namespace mindspore::kernel

View File

@ -1,47 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_BASE_BATCH_TO_SPACE_BASE_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_BASE_BATCH_TO_SPACE_BASE_H_
#include <vector>
#include "src/lite_kernel.h"
#include "nnacl/concat_parameter.h"
namespace mindspore::kernel {
class BatchToSpaceBaseCPUKernel : public LiteKernel {
public:
BatchToSpaceBaseCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx,
const mindspore::lite::PrimitiveC *primitive)
: LiteKernel(parameter, inputs, outputs, ctx, primitive) {}
virtual ~BatchToSpaceBaseCPUKernel() = default;
int Init() override;
int ReSize() override;
int Run() override { return 0; }
bool IsNoCrop() const { return no_crop_; }
private:
bool no_crop_ = false;
};
} // namespace mindspore::kernel
#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_BASE_BATCH_TO_SPACE_BASE_H_

View File

@ -1,90 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "src/runtime/kernel/arm/base/concat_base.h"
#include <vector>
#include "src/runtime/kernel/arm/fp32/concat_fp32.h"
#include "nnacl/fp32/concat_fp32.h"
#include "schema/model_generated.h"
#include "src/kernel_registry.h"
#include "include/errorcode.h"
#include "include/context.h"
using mindspore::lite::KernelRegistrar;
using mindspore::lite::RET_ERROR;
using mindspore::lite::RET_OK;
using mindspore::schema::PrimitiveType_Concat;
namespace mindspore::kernel {
int ConcatBaseCPUKernel::Init() { return RET_OK; }
int ConcatBaseCPUKernel::ReSize() {
concat_param_->axis_ =
concat_param_->axis_ >= 0 ? concat_param_->axis_ : in_tensors_.front()->shape().size() + concat_param_->axis_;
return RET_OK;
}
kernel::LiteKernel *CpuConcatInt32KernelCreator(const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, OpParameter *opParameter,
const InnerContext *ctx, const kernel::KernelKey &desc,
const mindspore::lite::PrimitiveC *primitive) {
if (opParameter == nullptr) {
MS_LOG(ERROR) << "Input opParameter is nullptr!";
return nullptr;
}
MS_ASSERT(desc.type == schema::PrimitiveType_Concat);
auto *kernel = new (std::nothrow) ConcatCPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new ConcatCPUKernel fail!";
return nullptr;
}
auto ret = kernel->Init();
if (ret != RET_OK) {
MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: "
<< schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(opParameter->type_));
delete kernel;
return nullptr;
}
return kernel;
}
kernel::LiteKernel *CpuConcatFp32KernelCreator(const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, OpParameter *opParameter,
const InnerContext *ctx, const kernel::KernelKey &desc,
const mindspore::lite::PrimitiveC *primitive) {
if (opParameter == nullptr) {
MS_LOG(ERROR) << "Input opParameter is nullptr!";
return nullptr;
}
MS_ASSERT(desc.type == schema::PrimitiveType_Concat);
auto *kernel = new (std::nothrow) ConcatCPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new ConcatCPUKernel fail!";
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();
if (ret != RET_OK) {
MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: "
<< schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(opParameter->type_));
delete kernel;
return nullptr;
}
return kernel;
}
REG_KERNEL(kCPU, kNumberTypeInt32, PrimitiveType_Concat, CpuConcatInt32KernelCreator)
REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_Concat, CpuConcatFp32KernelCreator)
} // namespace mindspore::kernel

View File

@ -1,52 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_BASE_CONCAT_BASE_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_BASE_CONCAT_BASE_H_
#include <vector>
#include "src/lite_kernel.h"
#include "nnacl/concat_parameter.h"
#include "src/runtime/kernel/arm/base/layout_transform.h"
using mindspore::lite::InnerContext;
namespace mindspore::kernel {
class ConcatBaseCPUKernel : public LiteKernel {
public:
ConcatBaseCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, const InnerContext *ctx,
const mindspore::lite::PrimitiveC *primitive)
: LiteKernel(parameter, inputs, outputs, ctx, primitive), ctx_(ctx), thread_count_(ctx->thread_num_) {
concat_param_ = reinterpret_cast<ConcatParameter *>(op_parameter_);
}
virtual ~ConcatBaseCPUKernel() = default;
int Init() override;
int ReSize() override;
int Run() override { return 0; }
protected:
const InnerContext *ctx_ = nullptr;
int thread_count_ = 1;
ConcatParameter *concat_param_ = nullptr;
};
} // namespace mindspore::kernel
#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_BASE_CONCAT_BASE_H_

View File

@ -31,30 +31,25 @@ int CropBaseCPUKernel::Init() { return RET_OK; }
int CropBaseCPUKernel::ReSize() {
auto *input_tensor = in_tensors_.at(kInputIndex);
auto *out_tensor = out_tensors_.at(kOutputIndex);
auto input_shape = input_tensor->shape();
auto output_shape = out_tensor->shape();
size_t input_dim = input_shape.size();
size_t output_dim = output_shape.size();
crop_para_->in_shape_ = reinterpret_cast<int *>(malloc(input_dim * sizeof(int)));
if (crop_para_->in_shape_ == nullptr) {
MS_LOG(ERROR) << "in_shape_ is nullptr";
return RET_ERROR;
}
memcpy(reinterpret_cast<void *>(const_cast<int *>(crop_para_->in_shape_)), input_shape.data(),
sizeof(int) * input_dim);
auto *out_tensor = out_tensors_.at(kOutputIndex);
auto output_shape = out_tensor->shape();
size_t output_dim = output_shape.size();
memcpy(crop_para_->in_shape_, input_shape.data(), sizeof(int) * input_dim);
crop_para_->out_shape_ = reinterpret_cast<int *>(malloc(output_dim * sizeof(int)));
if (crop_para_->out_shape_ == nullptr) {
MS_LOG(ERROR) << "out_shape_ is nullptr";
return RET_ERROR;
}
memcpy(reinterpret_cast<void *>(const_cast<int *>(crop_para_->out_shape_)), output_shape.data(),
sizeof(int) * output_dim);
memcpy(crop_para_->out_shape_, output_shape.data(), sizeof(int) * output_dim);
MS_ASSERT(input_dim <= CROP_OFFSET_MAX_SIZE);
crop_para_->input_dim_ = input_dim;
@ -81,56 +76,4 @@ void CropBaseCPUKernel::PadOffset(int input_dim, CropParameter *crop_para) {
crop_para->in_offset_[i] = crop_offset;
}
}
kernel::LiteKernel *CpuCropInt32KernelCreator(const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, OpParameter *opParameter,
const InnerContext *ctx, const kernel::KernelKey &desc,
const mindspore::lite::PrimitiveC *primitive) {
if (opParameter == nullptr) {
MS_LOG(ERROR) << "Input opParameter is nullptr!";
return nullptr;
}
MS_ASSERT(desc.type == schema::PrimitiveType_Crop);
auto *kernel = new (std::nothrow) CropCPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new CropCPUKernel fail!";
return nullptr;
}
auto ret = kernel->Init();
if (ret != RET_OK) {
MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: "
<< schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(opParameter->type_));
delete kernel;
return nullptr;
}
return kernel;
}
kernel::LiteKernel *CpuCropFp32KernelCreator(const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, OpParameter *opParameter,
const InnerContext *ctx, const kernel::KernelKey &desc,
const mindspore::lite::PrimitiveC *primitive) {
if (opParameter == nullptr) {
MS_LOG(ERROR) << "Input opParameter is nullptr!";
return nullptr;
}
MS_ASSERT(desc.type == schema::PrimitiveType_Crop);
auto *kernel = new (std::nothrow) CropCPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new CropCPUKernel fail!";
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();
if (ret != RET_OK) {
MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: "
<< schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(opParameter->type_));
delete kernel;
return nullptr;
}
return kernel;
}
REG_KERNEL(kCPU, kNumberTypeInt32, PrimitiveType_Crop, CpuCropInt32KernelCreator)
REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_Crop, CpuCropFp32KernelCreator)
} // namespace mindspore::kernel

View File

@ -21,15 +21,13 @@
#include "src/lite_kernel.h"
#include "nnacl/crop_parameter.h"
using mindspore::lite::InnerContext;
namespace mindspore::kernel {
class CropBaseCPUKernel : public LiteKernel {
public:
CropBaseCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, const InnerContext *ctx,
const std::vector<lite::Tensor *> &outputs, const mindspore::lite::InnerContext *ctx,
const mindspore::lite::PrimitiveC *primitive)
: LiteKernel(parameter, inputs, outputs, ctx, primitive), thread_count_(ctx->thread_num_) {
: LiteKernel(parameter, inputs, outputs, ctx, primitive) {
crop_para_ = reinterpret_cast<CropParameter *>(op_parameter_);
crop_para_->thread_count_ = op_parameter_->thread_num_;
}
@ -50,7 +48,6 @@ class CropBaseCPUKernel : public LiteKernel {
protected:
CropParameter *crop_para_;
int thread_count_;
void PadOffset(int input_dim, CropParameter *crop_para);
};
} // namespace mindspore::kernel

View File

@ -30,15 +30,12 @@ using mindspore::lite::RET_PARAM_INVALID;
using mindspore::schema::PrimitiveType_DepthToSpace;
namespace mindspore::kernel {
int DepthToSpaceBaseCPUKernel::Init() { return RET_OK; }
int DepthToSpaceBaseCPUKernel::ReSize() {
if (in_tensors_.at(0)->format() != schema::Format::Format_NHWC) {
MS_LOG(ERROR) << "depth_to_space only support NHWC now!";
return RET_FORMAT_ERR;
}
DepthToSpaceParameter *param = reinterpret_cast<DepthToSpaceParameter *>(op_parameter_);
if (param->block_size_ <= 0) {
if (param_->block_size_ <= 0) {
MS_LOG(ERROR) << "Input block_size should > 0!";
return RET_PARAM_INVALID;
}
@ -49,43 +46,14 @@ int DepthToSpaceBaseCPUKernel::ReSize() {
}
int32_t in_strides[DIMENSION_4D];
ComputeStrides(const_cast<int *>(in_tensors_.at(0)->shape().data()), in_strides, shape_size);
param->in_stride_dim0_ = in_strides[0];
param->in_stride_dim1_ = in_strides[1];
param->in_stride_dim2_ = in_strides[2];
param_->in_stride_dim0_ = in_strides[0];
param_->in_stride_dim1_ = in_strides[1];
param_->in_stride_dim2_ = in_strides[2];
int32_t out_strides[DIMENSION_4D];
ComputeStrides(const_cast<int *>(out_tensors_.at(0)->shape().data()), out_strides, shape_size);
param->out_stride_dim0_ = out_strides[0];
param->out_stride_dim1_ = out_strides[1];
param->out_stride_dim2_ = out_strides[2];
param_->out_stride_dim0_ = out_strides[0];
param_->out_stride_dim1_ = out_strides[1];
param_->out_stride_dim2_ = out_strides[2];
return RET_OK;
}
kernel::LiteKernel *CpuDepthToSpaceFp32KernelCreator(const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs,
OpParameter *op_parameter, const lite::InnerContext *ctx,
const kernel::KernelKey &desc,
const mindspore::lite::PrimitiveC *primitive) {
MS_ASSERT(desc.type == schema::PrimitiveType_DepthToSpace);
if (op_parameter == nullptr) {
MS_LOG(ERROR) << "Input op_parameter is nullptr!";
return nullptr;
}
auto *kernel = new (std::nothrow) DepthToSpaceCPUKernel(op_parameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new DepthToSpaceCPUKernel fail!";
free(op_parameter);
return nullptr;
}
auto ret = kernel->Init();
if (ret != RET_OK) {
MS_LOG(ERROR) << "Init kernel failed, name: " << op_parameter->name_ << ", type: "
<< schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(op_parameter->type_));
delete kernel;
return nullptr;
}
return kernel;
}
REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_DepthToSpace, CpuDepthToSpaceFp32KernelCreator)
} // namespace mindspore::kernel

View File

@ -18,8 +18,10 @@
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_BASE_DEPTH_TO_SPACE_BASE_H_
#include <vector>
#include "include/errorcode.h"
#include "src/lite_kernel.h"
#include "nnacl/depth_to_space.h"
#include "nnacl/depth_to_space_parameter.h"
namespace mindspore::kernel {
class DepthToSpaceBaseCPUKernel : public LiteKernel {
@ -27,15 +29,16 @@ class DepthToSpaceBaseCPUKernel : public LiteKernel {
DepthToSpaceBaseCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx,
const mindspore::lite::PrimitiveC *primitive)
: LiteKernel(parameter, inputs, outputs, ctx, primitive) {}
: LiteKernel(parameter, inputs, outputs, ctx, primitive) {
param_ = reinterpret_cast<DepthToSpaceParameter *>(op_parameter_);
}
virtual ~DepthToSpaceBaseCPUKernel() = default;
int Init() override;
int Init() override { return lite::RET_OK; }
int ReSize() override;
int Run() override { return lite::RET_OK; }
int Run() override { return 0; }
protected:
DepthToSpaceParameter *param_ = nullptr;
};
} // namespace mindspore::kernel
#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_BASE_DEPTH_TO_SPACE_BASE_H_

View File

@ -26,7 +26,6 @@ using mindspore::lite::RET_OK;
using mindspore::schema::PrimitiveType_DetectionPostProcess;
namespace mindspore::kernel {
void PartialArgSort(const float *scores, int *indexes, int num_to_sort, int num_values) {
std::partial_sort(indexes, indexes + num_to_sort, indexes + num_values, [&scores](const int i, const int j) {
if (scores[i] == scores[j]) {
@ -151,10 +150,10 @@ int DetectionPostProcessBaseCPUKernel::Run() {
if (status != RET_OK) {
return status;
}
auto output_boxes = reinterpret_cast<float *>(out_tensors_.at(0)->MutableData());
auto output_classes = reinterpret_cast<float *>(out_tensors_.at(1)->MutableData());
auto output_scores = reinterpret_cast<float *>(out_tensors_.at(2)->MutableData());
auto output_num = reinterpret_cast<float *>(out_tensors_.at(3)->MutableData());
auto output_boxes = reinterpret_cast<float *>(out_tensors_.at(0)->data_c());
auto output_classes = reinterpret_cast<float *>(out_tensors_.at(1)->data_c());
auto output_scores = reinterpret_cast<float *>(out_tensors_.at(2)->data_c());
auto output_num = reinterpret_cast<float *>(out_tensors_.at(3)->data_c());
num_boxes_ = in_tensors_.at(0)->shape().at(1);
num_classes_with_bg_ = in_tensors_.at(1)->shape().at(2);
@ -256,5 +255,5 @@ int DetectionPostProcessBaseCPUKernel::Run() {
}
FreeAllocatedBuffer();
return RET_OK;
} // namespace mindspore::kernel
}
} // namespace mindspore::kernel

View File

@ -1,88 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "src/runtime/kernel/arm/base/fullconnection_base.h"
#include "src/runtime/kernel/arm/fp32/fullconnection_fp32.h"
#include "schema/model_generated.h"
#include "src/kernel_registry.h"
#include "include/errorcode.h"
#include "include/context.h"
#include "src/runtime/kernel/arm/base/dequant.h"
using mindspore::lite::KernelRegistrar;
using mindspore::lite::RET_ERROR;
using mindspore::lite::RET_OK;
using mindspore::schema::PrimitiveType_FullConnection;
namespace mindspore::kernel {
int FullconnectionBaseCPUKernel::Init() {
fc_param_->op_parameter_.thread_num_ = thread_count_;
return RET_OK;
}
kernel::LiteKernel *CpuFullConnectionFp32KernelCreator(const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs,
OpParameter *opParameter, const lite::InnerContext *ctx,
const kernel::KernelKey &desc,
const mindspore::lite::PrimitiveC *primitive) {
MS_ASSERT(opParameter != nullptr);
MS_ASSERT(desc.type == schema::PrimitiveType_FullConnection);
auto *weight_tensor = inputs.at(kWeightIndex);
// data of second tensor of fc may be nullptr
auto *restore_data = weight_tensor->data_c();
auto restore_type = weight_tensor->data_type();
bool dequant_flag =
!weight_tensor->quant_params().empty() && weight_tensor->quant_params().front().inited && restore_data != nullptr;
if (dequant_flag) {
auto *dequant_weight = kernel::DequantUtil::DequantWeight(weight_tensor);
if (dequant_weight == nullptr) {
MS_LOG(ERROR) << "dequant data is nullptr.";
return nullptr;
}
weight_tensor->set_data(dequant_weight);
}
auto kernel = new (std::nothrow) FullconnectionCPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (!kernel) {
MS_LOG(ERROR) << "kernel is nullptr.";
if (dequant_flag) {
weight_tensor->FreeData();
weight_tensor->set_data(restore_data);
weight_tensor->set_data_type(restore_type);
}
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();
if (ret != RET_OK) {
MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: "
<< schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(opParameter->type_));
if (dequant_flag) {
weight_tensor->FreeData();
weight_tensor->set_data(restore_data);
weight_tensor->set_data_type(restore_type);
}
delete kernel;
return nullptr;
}
if (dequant_flag) {
weight_tensor->FreeData();
weight_tensor->set_data(restore_data);
weight_tensor->set_data_type(restore_type);
}
return kernel;
}
REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_FullConnection, CpuFullConnectionFp32KernelCreator)
} // namespace mindspore::kernel

View File

@ -1,50 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_BASE_FULLCONNECTION_BASE_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_BASE_FULLCONNECTION_BASE_H_
#include <vector>
#include "src/lite_kernel.h"
#include "include/context.h"
#include "nnacl/matmul_parameter.h"
using mindspore::lite::InnerContext;
namespace mindspore::kernel {
class FullconnectionBaseCPUKernel : public LiteKernel {
public:
FullconnectionBaseCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, const InnerContext *ctx,
const mindspore::lite::PrimitiveC *primitive)
: LiteKernel(parameter, inputs, outputs, ctx, primitive), ctx_(ctx), thread_count_(ctx->thread_num_) {
fc_param_ = reinterpret_cast<MatMulParameter *>(op_parameter_);
}
~FullconnectionBaseCPUKernel() = default;
int Init() override;
int ReSize() override { return 0; }
int Run() override { return 0; }
protected:
MatMulParameter *fc_param_ = nullptr;
int thread_stride_ = 0;
const InnerContext *ctx_ = nullptr;
int thread_count_ = 1;
};
} // namespace mindspore::kernel
#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_BASE_FULLCONNECTION_BASE_H_

View File

@ -1,59 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "src/runtime/kernel/arm/base/gelu_base.h"
#include <vector>
#include "src/runtime/kernel/arm/fp32/gelu_fp32.h"
#include "nnacl/fp32/gelu_fp32.h"
#include "schema/model_generated.h"
#include "src/kernel_registry.h"
#include "include/errorcode.h"
#include "include/context.h"
using mindspore::lite::KernelRegistrar;
using mindspore::lite::RET_ERROR;
using mindspore::lite::RET_OK;
// using mindspore::schema::PrimitiveType_GeLU;
namespace mindspore::kernel {
int GeLUBaseCPUKernel::Init() { return RET_OK; }
kernel::LiteKernel *CpuGeLUFp32KernelCreator(const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, OpParameter *opParameter,
const InnerContext *ctx, const kernel::KernelKey &desc,
const mindspore::lite::PrimitiveC *primitive) {
if (opParameter == nullptr) {
MS_LOG(ERROR) << "Input opParameter is nullptr!";
return nullptr;
}
// MS_ASSERT(desc.type == schema::PrimitiveType_GeLU);
auto *kernel = new (std::nothrow) GeLUCPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new GeLUCPUKernel fail!";
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();
if (ret != RET_OK) {
MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: "
<< schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(opParameter->type_));
delete kernel;
return nullptr;
}
return kernel;
}
// REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_GeLU, CpuGeLUFp32KernelCreator)
} // namespace mindspore::kernel

View File

@ -1,50 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_BASE_GELU_BASE_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_BASE_GELU_BASE_H_
#include <vector>
#include "src/lite_kernel.h"
#include "nnacl/gelu_parameter.h"
#include "src/runtime/kernel/arm/base/layout_transform.h"
using mindspore::lite::InnerContext;
namespace mindspore::kernel {
class GeLUBaseCPUKernel : public LiteKernel {
public:
GeLUBaseCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, const InnerContext *ctx,
const mindspore::lite::PrimitiveC *primitive)
: LiteKernel(parameter, inputs, outputs, ctx, primitive), ctx_(ctx), thread_count_(ctx->thread_num_) {
gelu_param_ = reinterpret_cast<GeLUParameter *>(op_parameter_);
}
virtual ~GeLUBaseCPUKernel() = default;
int Init() override;
int Run() override { return 0; }
protected:
const InnerContext *ctx_ = nullptr;
int thread_count_ = 1;
GeLUParameter *gelu_param_ = nullptr;
};
} // namespace mindspore::kernel
#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_BASE_GELU_BASE_H_

View File

@ -21,7 +21,6 @@
#include <arm_neon.h>
#endif
#include "nnacl/pack.h"
#include "schema/ops_generated.h"
#include "src/tensor.h"

View File

@ -1,31 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "src/runtime/kernel/arm/base/leaky_relu_base.h"
#include <vector>
#include "schema/model_generated.h"
#include "src/kernel_registry.h"
#include "include/errorcode.h"
#include "include/context.h"
using mindspore::lite::KernelRegistrar;
using mindspore::lite::RET_ERROR;
using mindspore::lite::RET_OK;
using mindspore::schema::PrimitiveType_LeakyReLU;
namespace mindspore::kernel {
int LeakyReluBaseCPUKernel::Init() { return RET_OK; }
} // namespace mindspore::kernel

View File

@ -1,44 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_BASE_LEAKY_RELU_BASE_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_BASE_LEAKY_RELU_BASE_H_
#include <vector>
#include "src/lite_kernel.h"
#include "src/runtime/kernel/arm/base/layout_transform.h"
using mindspore::lite::InnerContext;
namespace mindspore::kernel {
class LeakyReluBaseCPUKernel : public LiteKernel {
public:
LeakyReluBaseCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, const InnerContext *ctx,
const mindspore::lite::PrimitiveC *primitive)
: LiteKernel(parameter, inputs, outputs, ctx, primitive) {}
~LeakyReluBaseCPUKernel() = default;
int Init() override;
int ReSize() override { return 0; }
int Run() override { return 0; }
};
} // namespace mindspore::kernel
#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_BASE_LEAKY_RELU_BASE_H_

View File

@ -1,56 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "src/runtime/kernel/arm/base/matmul_base.h"
#include "src/runtime/kernel/arm/fp32/matmul_fp32.h"
#include "src/kernel_registry.h"
#include "include/errorcode.h"
#include "include/context.h"
using mindspore::lite::KernelRegistrar;
using mindspore::lite::RET_ERROR;
using mindspore::lite::RET_OK;
using mindspore::schema::PrimitiveType_MatMul;
namespace mindspore::kernel {
kernel::LiteKernel *CpuMatmulKernelCreator(const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, OpParameter *opParameter,
const lite::InnerContext *ctx, const kernel::KernelKey &desc,
const mindspore::lite::PrimitiveC *primitive) {
MS_ASSERT(opParameter != nullptr);
MS_ASSERT(desc.type == schema::PrimitiveType_Concat);
auto input_tensor = inputs.at(kInputIndex);
auto data_type = input_tensor->data_type();
kernel::LiteKernel *kernel = nullptr;
if (data_type == kNumberTypeFloat32) {
kernel = new (std::nothrow) MatmulCPUKernel(opParameter, inputs, outputs, ctx, primitive);
}
if (kernel == nullptr) {
MS_LOG(ERROR) << "kernel is nullptr.";
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();
if (ret != RET_OK) {
MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: "
<< schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(opParameter->type_));
delete kernel;
return nullptr;
}
return kernel;
}
REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_MatMul, CpuMatmulKernelCreator)
} // namespace mindspore::kernel

View File

@ -1,50 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_BASE_MATMUL_BASE_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_BASE_MATMUL_BASE_H_
#include <vector>
#include "src/lite_kernel.h"
#include "include/context.h"
#include "nnacl/matmul_parameter.h"
using mindspore::lite::InnerContext;
namespace mindspore::kernel {
class MatmulBaseCPUKernel : public LiteKernel {
public:
MatmulBaseCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, const InnerContext *ctx,
const mindspore::lite::PrimitiveC *primitive)
: LiteKernel(parameter, inputs, outputs, ctx, primitive), ctx_(ctx), thread_count_(ctx->thread_num_) {
params_ = reinterpret_cast<MatMulParameter *>(op_parameter_);
}
~MatmulBaseCPUKernel() = default;
int Init() override { return 0; }
int ReSize() override { return 0; }
int Run() override { return 0; }
protected:
MatMulParameter *params_ = nullptr;
int thread_stride_ = 0;
const InnerContext *ctx_ = nullptr;
int thread_count_ = 0;
};
} // namespace mindspore::kernel
#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_BASE_MATMUL_BASE_H_

View File

@ -25,7 +25,6 @@ using mindspore::lite::RET_OK;
using mindspore::schema::PrimitiveType_Merge;
namespace mindspore::kernel {
int MergeCPUKernel::FreeInWorkTensor() const {
for (auto &in_tensor : this->in_tensors_) {
MS_ASSERT(in_tensor != nullptr);
@ -131,35 +130,7 @@ int MergeCPUKernel::Run() {
return RET_OK;
}
kernel::LiteKernel *CpuMergeKernelCreator(const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, OpParameter *parameter,
const lite::InnerContext *ctx, const KernelKey &desc,
const mindspore::lite::PrimitiveC *primitive) {
if (parameter == nullptr) {
MS_LOG(ERROR) << "parameter is nullptr";
return nullptr;
}
if (desc.type != PrimitiveType_Merge) {
MS_LOG(ERROR) << "type in desc is not Merge";
free(parameter);
return nullptr;
}
if (ctx == nullptr) {
MS_LOG(ERROR) << "ctx is nullptr";
free(parameter);
return nullptr;
}
auto *kernel = new (std::nothrow) MergeCPUKernel(parameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "Create kernel failed, name: " << parameter->name_;
free(parameter);
return nullptr;
}
return kernel;
}
REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_Merge, CpuMergeKernelCreator)
REG_KERNEL(kCPU, kNumberTypeBool, PrimitiveType_Merge, CpuMergeKernelCreator)
REG_KERNEL(kCPU, kNumberTypeInt32, PrimitiveType_Merge, CpuMergeKernelCreator)
REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_Merge, CPUKernelCreator<MergeCPUKernel>)
REG_KERNEL(kCPU, kNumberTypeBool, PrimitiveType_Merge, CPUKernelCreator<MergeCPUKernel>)
REG_KERNEL(kCPU, kNumberTypeInt32, PrimitiveType_Merge, CPUKernelCreator<MergeCPUKernel>)
} // namespace mindspore::kernel

View File

@ -20,29 +20,21 @@
#include "src/lite_kernel.h"
namespace mindspore::kernel {
typedef struct MergeParameter {
OpParameter op_parameter_;
} MergeParameter;
class MergeCPUKernel : public LiteKernel {
public:
MergeCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx,
const mindspore::lite::PrimitiveC *primitive)
: LiteKernel(parameter, inputs, outputs, ctx, primitive) {
merge_param_ = reinterpret_cast<MergeParameter *>(op_parameter_);
}
: LiteKernel(parameter, inputs, outputs, ctx, primitive) {}
~MergeCPUKernel() override {}
int FreeInWorkTensor() const override;
bool IsReady(const std::vector<lite::Tensor *> &scope_tensors) override;
int Init() override;
int ReSize() override;
int Run() override;
bool PartialInputReady(int num_begin, int num_end);
int FreeInWorkTensor() const override;
bool IsReady(const std::vector<lite::Tensor *> &scope_tensors) override;
private:
MergeParameter *merge_param_ = nullptr;
bool PartialInputReady(int num_begin, int num_end);
};
} // namespace mindspore::kernel

View File

@ -1,53 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <vector>
#include "src/runtime/kernel/arm/fp32/pad_fp32.h"
#include "schema/model_generated.h"
#include "src/kernel_registry.h"
#include "include/errorcode.h"
#include "include/context.h"
using mindspore::lite::KernelRegistrar;
using mindspore::lite::RET_ERROR;
using mindspore::lite::RET_OK;
using mindspore::schema::PrimitiveType_Pad;
namespace mindspore::kernel {
kernel::LiteKernel *CpuPadFp32KernelCreator(const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, OpParameter *opParameter,
const lite::InnerContext *ctx, const kernel::KernelKey &desc,
const mindspore::lite::PrimitiveC *primitive) {
MS_ASSERT(opParameter != nullptr);
MS_ASSERT(desc.type == schema::PrimitiveType_Pad);
auto *kernel = new (std::nothrow) PadCPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new PadCPUKernel failed.";
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();
if (ret != RET_OK) {
MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: "
<< schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(opParameter->type_));
delete kernel;
return nullptr;
}
return kernel;
}
REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_Pad, CpuPadFp32KernelCreator)
} // namespace mindspore::kernel

View File

@ -115,31 +115,4 @@ int PoolingBaseCPUKernel::ReSize() {
}
return RET_OK;
}
kernel::LiteKernel *CpuPoolingFp32KernelCreator(const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, OpParameter *opParameter,
const InnerContext *ctx, const kernel::KernelKey &desc,
const mindspore::lite::PrimitiveC *primitive) {
if (opParameter == nullptr) {
MS_LOG(ERROR) << "Input opParameter is nullptr!";
return nullptr;
}
MS_ASSERT(desc.type == schema::PrimitiveType_Pooling);
auto *kernel = new (std::nothrow) PoolingCPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new PoolingCPUKernel fail!";
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();
if (ret != RET_OK) {
MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: "
<< schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(opParameter->type_));
delete kernel;
return nullptr;
}
return kernel;
}
REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_Pooling, CpuPoolingFp32KernelCreator)
} // namespace mindspore::kernel

View File

@ -1,56 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "src/runtime/kernel/arm/base/power_base.h"
#include <vector>
#include "src/runtime/kernel/arm/fp32/power_fp32.h"
#include "schema/model_generated.h"
#include "src/kernel_registry.h"
#include "include/errorcode.h"
using mindspore::lite::KernelRegistrar;
using mindspore::lite::RET_ERROR;
using mindspore::lite::RET_OK;
using mindspore::schema::PrimitiveType_Power;
namespace mindspore::kernel {
int PowerBaseCPUKernel::Init() { return RET_OK; }
int PowerBaseCPUKernel::ReSize() { return RET_OK; }
kernel::LiteKernel *CpuPowerFp32KernelCreator(const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, OpParameter *opParameter,
const lite::InnerContext *ctx, const kernel::KernelKey &desc,
const mindspore::lite::PrimitiveC *primitive) {
MS_ASSERT(opParameter != nullptr);
MS_ASSERT(desc.type == schema::PrimitiveType_Power);
PowerCPUKernel *kernel = new (std::nothrow) PowerCPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new PowerCPUKernel fail!";
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();
if (ret != RET_OK) {
MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: "
<< schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(opParameter->type_));
delete kernel;
return nullptr;
}
return kernel;
}
REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_Power, CpuPowerFp32KernelCreator)
} // namespace mindspore::kernel

View File

@ -1,44 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_BASE_POWER_BASE_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_BASE_POWER_BASE_H_
#include <vector>
#include "src/lite_kernel.h"
#include "nnacl/power_parameter.h"
namespace mindspore::kernel {
class PowerBaseCPUKernel : public LiteKernel {
public:
PowerBaseCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx,
const mindspore::lite::PrimitiveC *primitive)
: LiteKernel(parameter, inputs, outputs, ctx, primitive) {
param_ = reinterpret_cast<PowerParameter *>(op_parameter_);
}
~PowerBaseCPUKernel() = default;
int Init() override;
int ReSize() override;
int Run() override { return 0; }
protected:
PowerParameter *param_;
};
} // namespace mindspore::kernel
#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_BASE_POWER_BASE_H_

View File

@ -174,35 +174,6 @@ int PriorBoxCPUKernel::Run() {
return RET_OK;
}
kernel::LiteKernel *CpuPriorBoxKernelCreator(const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, OpParameter *op_parameter,
const InnerContext *ctx, const kernel::KernelKey &desc,
const mindspore::lite::PrimitiveC *primitive) {
if (op_parameter == nullptr) {
MS_LOG(ERROR) << "Input op_parameter is nullptr!";
return nullptr;
}
if (desc.type != schema::PrimitiveType_PriorBox) {
MS_LOG(ERROR) << "PriorBox invalid desc type " << desc.type;
free(op_parameter);
return nullptr;
}
auto *kernel = new (std::nothrow) PriorBoxCPUKernel(op_parameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new PriorBoxCPUKernel fail!";
free(op_parameter);
return nullptr;
}
auto ret = kernel->Init();
if (ret != RET_OK) {
MS_LOG(ERROR) << "Init kernel failed, name: " << op_parameter->name_ << ", type: "
<< schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(op_parameter->type_));
delete kernel;
return nullptr;
}
return kernel;
}
REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_PriorBox, CpuPriorBoxKernelCreator)
REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_PriorBox, CpuPriorBoxKernelCreator)
REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_PriorBox, CPUKernelCreator<PriorBoxCPUKernel>)
REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_PriorBox, CPUKernelCreator<PriorBoxCPUKernel>)
} // namespace mindspore::kernel

View File

@ -212,31 +212,7 @@ int QuantDTypeCastCPUKernel::Run() {
return RET_OK;
}
kernel::LiteKernel *CpuQuantDTypeCastFp32KernelCreator(const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs,
OpParameter *opParameter, const lite::InnerContext *ctx,
const kernel::KernelKey &desc,
const mindspore::lite::PrimitiveC *primitive) {
if (opParameter == nullptr) {
MS_LOG(ERROR) << "Input opParameter is nullptr!";
return nullptr;
}
auto *kernel = new (std::nothrow) QuantDTypeCastCPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new QuantDTypeCastCPUKernel fail!";
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();
if (ret != RET_OK) {
MS_LOG(ERROR) << "Init kernel failed! name: " << opParameter->name_ << ", type: "
<< schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(opParameter->type_));
delete kernel;
return nullptr;
}
return kernel;
}
REG_KERNEL(kCPU, kNumberTypeUInt8, PrimitiveType_QuantDTypeCast, CpuQuantDTypeCastFp32KernelCreator)
REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_QuantDTypeCast, CpuQuantDTypeCastFp32KernelCreator)
REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_QuantDTypeCast, CpuQuantDTypeCastFp32KernelCreator)
REG_KERNEL(kCPU, kNumberTypeUInt8, PrimitiveType_QuantDTypeCast, CPUKernelCreator<QuantDTypeCastCPUKernel>)
REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_QuantDTypeCast, CPUKernelCreator<QuantDTypeCastCPUKernel>)
REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_QuantDTypeCast, CPUKernelCreator<QuantDTypeCastCPUKernel>)
} // namespace mindspore::kernel

View File

@ -200,28 +200,4 @@ kernel::LiteKernel *CpuReduceFp32KernelCreator(const std::vector<lite::Tensor *>
}
return kernel;
}
kernel::LiteKernel *CpuMeanFp32KernelCreator(const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, OpParameter *opParameter,
const lite::InnerContext *ctx, const kernel::KernelKey &desc,
const mindspore::lite::PrimitiveC *primitive) {
auto *kernel = new (std::nothrow) ReduceCPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "Reduce new ReduceCPUKernel failed.";
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();
if (ret != RET_OK) {
MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: "
<< schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(opParameter->type_));
delete kernel;
return nullptr;
}
return kernel;
}
REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_Reduce, CpuReduceFp32KernelCreator)
REG_KERNEL(kCPU, kNumberTypeInt, PrimitiveType_Reduce, CpuReduceFp32KernelCreator)
REG_KERNEL(kCPU, kNumberTypeInt32, PrimitiveType_Reduce, CpuReduceFp32KernelCreator)
} // namespace mindspore::kernel

View File

@ -1,84 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "src/runtime/kernel/arm/base/reshape_base.h"
#include <vector>
#include "src/runtime/kernel/arm/fp32/reshape_fp32.h"
#include "schema/model_generated.h"
#include "src/kernel_registry.h"
#include "include/errorcode.h"
#include "include/context.h"
using mindspore::lite::KernelRegistrar;
using mindspore::lite::RET_ERROR;
using mindspore::lite::RET_OK;
using mindspore::schema::PrimitiveType_Reshape;
namespace mindspore::kernel {
int ReshapeBaseCPUKernel::Init() { return RET_OK; }
kernel::LiteKernel *CpuReshapeInt32KernelCreator(const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, OpParameter *opParameter,
const InnerContext *ctx, const kernel::KernelKey &desc,
const mindspore::lite::PrimitiveC *primitive) {
if (opParameter == nullptr) {
MS_LOG(ERROR) << "Input opParameter is nullptr!";
return nullptr;
}
MS_ASSERT(desc.type == schema::PrimitiveType_Reshape);
auto *kernel = new (std::nothrow) ReshapeCPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new ReshapeCPUKernel fail!";
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();
if (ret != RET_OK) {
MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: "
<< schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(opParameter->type_));
delete kernel;
return nullptr;
}
return kernel;
}
kernel::LiteKernel *CpuReshapeFp32KernelCreator(const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, OpParameter *opParameter,
const InnerContext *ctx, const kernel::KernelKey &desc,
const mindspore::lite::PrimitiveC *primitive) {
if (opParameter == nullptr) {
MS_LOG(ERROR) << "Input opParameter is nullptr!";
return nullptr;
}
MS_ASSERT(desc.type == schema::PrimitiveType_Reshape);
auto *kernel = new (std::nothrow) ReshapeCPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new ReshapeCPUKernel fail!";
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();
if (ret != RET_OK) {
MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: "
<< schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(opParameter->type_));
delete kernel;
return nullptr;
}
return kernel;
}
REG_KERNEL(kCPU, kNumberTypeInt32, PrimitiveType_Reshape, CpuReshapeInt32KernelCreator)
REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_Reshape, CpuReshapeFp32KernelCreator)
} // namespace mindspore::kernel

View File

@ -1,47 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_BASE_RESHAPE_BASE_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_BASE_RESHAPE_BASE_H_
#include <vector>
#include "src/lite_kernel.h"
#include "nnacl/reshape_parameter.h"
using mindspore::lite::InnerContext;
namespace mindspore::kernel {
class ReshapeBaseCPUKernel : public LiteKernel {
public:
ReshapeBaseCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, const InnerContext *ctx,
const mindspore::lite::PrimitiveC *primitive)
: LiteKernel(parameter, inputs, outputs, ctx, primitive), ctx_(ctx) {
reshape_param_ = reinterpret_cast<ReshapeParameter *>(op_parameter_);
}
~ReshapeBaseCPUKernel() = default;
int Init() override;
int ReSize() override { return 0; }
int Run() override { return 0; }
protected:
const InnerContext *ctx_;
ReshapeParameter *reshape_param_;
};
} // namespace mindspore::kernel
#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_BASE_RESHAPE_BASE_H_

View File

@ -126,32 +126,4 @@ int ResizeBaseCPUKernel::Init() {
return RET_OK;
}
kernel::LiteKernel *CpuResizeFp32KernelCreator(const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, OpParameter *opParameter,
const lite::InnerContext *ctx, const kernel::KernelKey &desc,
const mindspore::lite::PrimitiveC *primitive) {
if (opParameter == nullptr) {
MS_LOG(ERROR) << "Input opParameter is nullptr!";
return nullptr;
}
MS_ASSERT(desc.type == schema::PrimitiveType_Resize);
auto *kernel = new (std::nothrow) ResizeCPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new ResizeCPUKernel fail!";
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();
if (ret != RET_OK) {
MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: "
<< schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(opParameter->type_));
delete kernel;
return nullptr;
}
return kernel;
}
REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_Resize, CpuResizeFp32KernelCreator)
} // namespace mindspore::kernel

View File

@ -54,31 +54,4 @@ int SoftmaxBaseCPUKernel::ReSize() {
softmax_param_->element_size_ = ele_size;
return RET_OK;
}
kernel::LiteKernel *CpuSoftmaxFp32KernelCreator(const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, OpParameter *opParameter,
const lite::InnerContext *ctx, const kernel::KernelKey &desc,
const mindspore::lite::PrimitiveC *primitive) {
if (opParameter == nullptr) {
MS_LOG(ERROR) << "Input opParameter is nullptr!";
return nullptr;
}
MS_ASSERT(desc.type == schema::PrimitiveType_SoftMax);
auto *kernel = new (std::nothrow) SoftmaxCPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new SoftmaxCPUKernel fail!";
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();
if (ret != RET_OK) {
MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: "
<< schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(opParameter->type_));
delete kernel;
return nullptr;
}
return kernel;
}
REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_SoftMax, CpuSoftmaxFp32KernelCreator)
} // namespace mindspore::kernel

View File

@ -75,57 +75,4 @@ int SplitBaseCPUKernel::ReSize() {
thread_n_stride_ = UP_DIV(num_unit_, thread_n_num_);
return RET_OK;
}
kernel::LiteKernel *CpuSplitInt32KernelCreator(const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, OpParameter *opParameter,
const InnerContext *ctx, const kernel::KernelKey &desc,
const mindspore::lite::PrimitiveC *primitive) {
if (opParameter == nullptr) {
MS_LOG(ERROR) << "Input opParameter is nullptr!";
return nullptr;
}
MS_ASSERT(desc.type == schema::PrimitiveType_Split);
auto *kernel = new (std::nothrow) SplitCPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new SplitCPUKernel fail!";
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();
if (ret != RET_OK) {
delete kernel;
MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: "
<< schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(opParameter->type_));
return nullptr;
}
return kernel;
}
kernel::LiteKernel *CpuSplitFp32KernelCreator(const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, OpParameter *opParameter,
const InnerContext *ctx, const kernel::KernelKey &desc,
const mindspore::lite::PrimitiveC *primitive) {
if (opParameter == nullptr) {
MS_LOG(ERROR) << "Input opParameter is nullptr!";
return nullptr;
}
MS_ASSERT(desc.type == schema::PrimitiveType_Split);
auto *kernel = new (std::nothrow) SplitCPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new SplitCPUKernel fail!";
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();
if (ret != RET_OK) {
MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: "
<< schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(opParameter->type_));
delete kernel;
return nullptr;
}
return kernel;
}
REG_KERNEL(kCPU, kNumberTypeInt32, PrimitiveType_Split, CpuSplitInt32KernelCreator)
REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_Split, CpuSplitFp32KernelCreator)
} // namespace mindspore::kernel

View File

@ -1,31 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "src/runtime/kernel/arm/base/squeeze_base.h"
#include <vector>
#include "schema/model_generated.h"
#include "src/kernel_registry.h"
#include "include/errorcode.h"
#include "include/context.h"
using mindspore::lite::KernelRegistrar;
using mindspore::lite::RET_ERROR;
using mindspore::lite::RET_OK;
using mindspore::schema::PrimitiveType_Squeeze;
namespace mindspore::kernel {
int SqueezeBaseCPUKernel::Init() { return RET_OK; }
} // namespace mindspore::kernel

View File

@ -1,50 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_BASE_SQUEEZE_BASE_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_BASE_SQUEEZE_BASE_H_
#include <vector>
#include "src/lite_kernel.h"
#include "nnacl/squeeze_parameter.h"
#include "src/runtime/kernel/arm/base/layout_transform.h"
using mindspore::lite::InnerContext;
namespace mindspore::kernel {
class SqueezeBaseCPUKernel : public LiteKernel {
public:
SqueezeBaseCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, const InnerContext *ctx,
const mindspore::lite::PrimitiveC *primitive)
: LiteKernel(parameter, inputs, outputs, ctx, primitive), ctx_(ctx), thread_count_(ctx->thread_num_) {}
virtual ~SqueezeBaseCPUKernel() = default;
int Init() override;
int ReSize() override { return 0; }
int Run() override { return 0; }
protected:
int *axis_;
const InnerContext *ctx_;
int thread_count_;
};
} // namespace mindspore::kernel
#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_BASE_SQUEEZE_BASE_H_

View File

@ -70,7 +70,7 @@ int StridedSliceCPUKernel::Run() {
}
auto output = out_tensors_.at(0);
MS_ASSERT(output);
auto ret = DoStridedSlice(input->MutableData(), output->MutableData(), param_);
auto ret = DoStridedSlice(input->data_c(), output->data_c(), param_);
if (ret != RET_OK) {
MS_LOG(ERROR) << "StridedSlice error error_code[" << ret << "]";
return RET_ERROR;
@ -78,33 +78,7 @@ int StridedSliceCPUKernel::Run() {
return RET_OK;
}
kernel::LiteKernel *CpuStridedSliceKernelCreator(const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, OpParameter *opParameter,
const lite::InnerContext *ctx, const kernel::KernelKey &desc,
const mindspore::lite::PrimitiveC *primitive) {
MS_ASSERT(desc.type == schema::PrimitiveType_StridedSlice);
if (opParameter == nullptr) {
MS_LOG(ERROR) << "opParameter null pointer dereferencing.";
return nullptr;
}
auto *kernel = new (std::nothrow) StridedSliceCPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "New kernel fails.";
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();
if (ret != RET_OK) {
MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: "
<< schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(opParameter->type_));
delete kernel;
return nullptr;
}
return kernel;
}
REG_KERNEL(kCPU, kNumberTypeInt32, PrimitiveType_StridedSlice, CpuStridedSliceKernelCreator)
REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_StridedSlice, CpuStridedSliceKernelCreator)
REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_StridedSlice, CpuStridedSliceKernelCreator)
REG_KERNEL(kCPU, kNumberTypeInt32, PrimitiveType_StridedSlice, CPUKernelCreator<StridedSliceCPUKernel>)
REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_StridedSlice, CPUKernelCreator<StridedSliceCPUKernel>)
REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_StridedSlice, CPUKernelCreator<StridedSliceCPUKernel>)
} // namespace mindspore::kernel

View File

@ -91,34 +91,7 @@ int SwitchCPUKernel::Run() {
return RET_OK;
}
kernel::LiteKernel *CpuSwitchKernelCreator(const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, OpParameter *parameter,
const lite::InnerContext *ctx, const KernelKey &desc,
const mindspore::lite::PrimitiveC *primitive) {
if (parameter == nullptr) {
MS_LOG(ERROR) << "parameter is nullptr";
return nullptr;
}
if (desc.type != PrimitiveType_Switch) {
MS_LOG(ERROR) << "type in desc is not Switch";
free(parameter);
return nullptr;
}
if (ctx == nullptr) {
MS_LOG(ERROR) << "ctx is nullptr";
free(parameter);
return nullptr;
}
auto *kernel = new (std::nothrow) SwitchCPUKernel(parameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "Create kernel failed, name: " << parameter->name_;
free(parameter);
return nullptr;
}
return kernel;
}
REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_Switch, CpuSwitchKernelCreator)
REG_KERNEL(kCPU, kNumberTypeBool, PrimitiveType_Switch, CpuSwitchKernelCreator)
REG_KERNEL(kCPU, kNumberTypeInt32, PrimitiveType_Switch, CpuSwitchKernelCreator)
REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_Switch, CPUKernelCreator<SwitchCPUKernel>)
REG_KERNEL(kCPU, kNumberTypeBool, PrimitiveType_Switch, CPUKernelCreator<SwitchCPUKernel>)
REG_KERNEL(kCPU, kNumberTypeInt32, PrimitiveType_Switch, CPUKernelCreator<SwitchCPUKernel>)
} // namespace mindspore::kernel

View File

@ -136,27 +136,5 @@ int ActivationFp16CPUKernel::Run() {
return RET_OK;
}
kernel::LiteKernel *CpuActivationFp16KernelCreator(const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, OpParameter *opParameter,
const lite::InnerContext *ctx, const kernel::KernelKey &desc,
const mindspore::lite::PrimitiveC *primitive) {
MS_ASSERT(opParameter != nullptr);
MS_ASSERT(desc.type == schema::PrimitiveType_Activation);
auto *kernel = new (std::nothrow) ActivationFp16CPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "kernel is nullptr.";
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();
if (ret != RET_OK) {
MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: "
<< schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(opParameter->type_));
delete kernel;
return nullptr;
}
return kernel;
}
REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Activation, CpuActivationFp16KernelCreator)
REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Activation, CPUKernelCreator<ActivationFp16CPUKernel>)
} // namespace mindspore::kernel

View File

@ -184,35 +184,10 @@ void ArithmeticCompareFP16CPUKernel::FreeTmpBuffer() {
}
}
kernel::LiteKernel *CpuArithmeticCompareFp16KernelCreator(const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs,
OpParameter *parameter, const lite::InnerContext *ctx,
const kernel::KernelKey &desc,
const mindspore::lite::PrimitiveC *primitive) {
if (parameter == nullptr) {
MS_LOG(ERROR) << "input parameter is null!";
return nullptr;
}
auto kernel = new (std::nothrow) ArithmeticCompareFP16CPUKernel(parameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "Create kernel failed, name: " << parameter->name_;
free(parameter);
return nullptr;
}
auto ret = kernel->Init();
if (ret != RET_OK) {
MS_LOG(ERROR) << "Init kernel failed, name: " << parameter->name_
<< ", type: " << schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(parameter->type_));
delete kernel;
return nullptr;
}
return kernel;
}
REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_NotEqual, CpuArithmeticCompareFp16KernelCreator)
REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Equal, CpuArithmeticCompareFp16KernelCreator)
REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Less, CpuArithmeticCompareFp16KernelCreator)
REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_LessEqual, CpuArithmeticCompareFp16KernelCreator)
REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Greater, CpuArithmeticCompareFp16KernelCreator)
REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_GreaterEqual, CpuArithmeticCompareFp16KernelCreator)
REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_NotEqual, CPUKernelCreator<ArithmeticCompareFP16CPUKernel>)
REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Equal, CPUKernelCreator<ArithmeticCompareFP16CPUKernel>)
REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Less, CPUKernelCreator<ArithmeticCompareFP16CPUKernel>)
REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_LessEqual, CPUKernelCreator<ArithmeticCompareFP16CPUKernel>)
REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Greater, CPUKernelCreator<ArithmeticCompareFP16CPUKernel>)
REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_GreaterEqual, CPUKernelCreator<ArithmeticCompareFP16CPUKernel>)
} // namespace mindspore::kernel

View File

@ -254,40 +254,16 @@ void ArithmeticFP16CPUKernel::FreeTmpBuffer() {
}
}
kernel::LiteKernel *CpuArithmeticFp16KernelCreator(const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, OpParameter *parameter,
const lite::InnerContext *ctx, const kernel::KernelKey &desc,
const mindspore::lite::PrimitiveC *primitive) {
if (parameter == nullptr) {
MS_LOG(ERROR) << "input parameter is null!";
return nullptr;
}
auto kernel = new (std::nothrow) ArithmeticFP16CPUKernel(parameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "Create kernel failed, name: " << parameter->name_;
free(parameter);
return nullptr;
}
auto ret = kernel->Init();
if (ret != RET_OK) {
MS_LOG(ERROR) << "Init kernel failed, name: " << parameter->name_
<< ", type: " << schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(parameter->type_));
delete kernel;
return nullptr;
}
return kernel;
}
REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Mul, CpuArithmeticFp16KernelCreator)
REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Add, CpuArithmeticFp16KernelCreator)
REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Sub, CpuArithmeticFp16KernelCreator)
REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Div, CpuArithmeticFp16KernelCreator)
REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_FloorMod, CpuArithmeticFp16KernelCreator)
REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_FloorDiv, CpuArithmeticFp16KernelCreator)
REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_LogicalAnd, CpuArithmeticFp16KernelCreator)
REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_LogicalOr, CpuArithmeticFp16KernelCreator)
REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Maximum, CpuArithmeticFp16KernelCreator)
REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Minimum, CpuArithmeticFp16KernelCreator)
REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Eltwise, CpuArithmeticFp16KernelCreator)
REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_SquaredDifference, CpuArithmeticFp16KernelCreator)
REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Mul, CPUKernelCreator<ArithmeticFP16CPUKernel>)
REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Add, CPUKernelCreator<ArithmeticFP16CPUKernel>)
REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Sub, CPUKernelCreator<ArithmeticFP16CPUKernel>)
REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Div, CPUKernelCreator<ArithmeticFP16CPUKernel>)
REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_FloorMod, CPUKernelCreator<ArithmeticFP16CPUKernel>)
REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_FloorDiv, CPUKernelCreator<ArithmeticFP16CPUKernel>)
REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_LogicalAnd, CPUKernelCreator<ArithmeticFP16CPUKernel>)
REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_LogicalOr, CPUKernelCreator<ArithmeticFP16CPUKernel>)
REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Maximum, CPUKernelCreator<ArithmeticFP16CPUKernel>)
REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Minimum, CPUKernelCreator<ArithmeticFP16CPUKernel>)
REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Eltwise, CPUKernelCreator<ArithmeticFP16CPUKernel>)
REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_SquaredDifference, CPUKernelCreator<ArithmeticFP16CPUKernel>)
} // namespace mindspore::kernel

View File

@ -105,40 +105,17 @@ int ArithmeticSelfFp16CPUKernel::Run() {
return ret;
}
kernel::LiteKernel *CpuArithmeticSelfFp16KernelCreator(const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs,
OpParameter *parameter, const lite::InnerContext *ctx,
const kernel::KernelKey &desc,
const mindspore::lite::PrimitiveC *primitive) {
auto *kernel = new (std::nothrow) ArithmeticSelfFp16CPUKernel(parameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new ArithmeticSelfFp16CPUKernel fail!";
if (parameter != nullptr) {
free(parameter);
}
return nullptr;
}
auto ret = kernel->Init();
if (ret != RET_OK) {
MS_LOG(ERROR) << "Init kernel failed, name: " << parameter->name_
<< ", type: " << schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(parameter->type_));
delete kernel;
return nullptr;
}
return kernel;
}
REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Abs, CpuArithmeticSelfFp16KernelCreator)
REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Cos, CpuArithmeticSelfFp16KernelCreator)
REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Log, CpuArithmeticSelfFp16KernelCreator)
REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Square, CpuArithmeticSelfFp16KernelCreator)
REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Sqrt, CpuArithmeticSelfFp16KernelCreator)
REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Rsqrt, CpuArithmeticSelfFp16KernelCreator)
REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Sin, CpuArithmeticSelfFp16KernelCreator)
REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_LogicalNot, CpuArithmeticSelfFp16KernelCreator)
REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Floor, CpuArithmeticSelfFp16KernelCreator)
REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Ceil, CpuArithmeticSelfFp16KernelCreator)
REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Round, CpuArithmeticSelfFp16KernelCreator)
REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Neg, CpuArithmeticSelfFp16KernelCreator)
REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Reciprocal, CpuArithmeticSelfFp16KernelCreator)
REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Abs, CPUKernelCreator<ArithmeticSelfFp16CPUKernel>)
REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Cos, CPUKernelCreator<ArithmeticSelfFp16CPUKernel>)
REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Log, CPUKernelCreator<ArithmeticSelfFp16CPUKernel>)
REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Square, CPUKernelCreator<ArithmeticSelfFp16CPUKernel>)
REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Sqrt, CPUKernelCreator<ArithmeticSelfFp16CPUKernel>)
REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Rsqrt, CPUKernelCreator<ArithmeticSelfFp16CPUKernel>)
REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Sin, CPUKernelCreator<ArithmeticSelfFp16CPUKernel>)
REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_LogicalNot, CPUKernelCreator<ArithmeticSelfFp16CPUKernel>)
REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Floor, CPUKernelCreator<ArithmeticSelfFp16CPUKernel>)
REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Ceil, CPUKernelCreator<ArithmeticSelfFp16CPUKernel>)
REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Round, CPUKernelCreator<ArithmeticSelfFp16CPUKernel>)
REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Neg, CPUKernelCreator<ArithmeticSelfFp16CPUKernel>)
REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Reciprocal, CPUKernelCreator<ArithmeticSelfFp16CPUKernel>)
} // namespace mindspore::kernel

View File

@ -87,25 +87,5 @@ void BatchnormFp16CPUKernel::FreeInputAndOutput() {
}
}
kernel::LiteKernel *CpuBatchnormFp16KernelCreator(const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, OpParameter *opParameter,
const lite::InnerContext *ctx, const kernel::KernelKey &desc,
const mindspore::lite::PrimitiveC *primitive) {
auto *kernel = new (std::nothrow) BatchnormFp16CPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new BatchnormFp16CPUKernel fail!";
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();
if (ret != RET_OK) {
MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: "
<< schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(opParameter->type_));
delete kernel;
return nullptr;
}
return kernel;
}
REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_BatchNorm, CpuBatchnormFp16KernelCreator)
REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_BatchNorm, CPUKernelCreator<BatchnormFp16CPUKernel>)
} // namespace mindspore::kernel

View File

@ -128,39 +128,5 @@ int CastFp16CPUKernel::Run() {
return ParallelLaunch(this->context_->thread_pool_, CastFp16Run, this, op_parameter_->thread_num_);
}
kernel::LiteKernel *CpuCastFp16KernelCreator(const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, OpParameter *opParameter,
const lite::InnerContext *ctx, const kernel::KernelKey &desc,
const mindspore::lite::PrimitiveC *primitive) {
if (opParameter == nullptr) {
MS_LOG(ERROR) << "Input opParameter is nullptr!";
return nullptr;
}
if (ctx == nullptr) {
MS_LOG(ERROR) << "Input context is nullptr!";
free(opParameter);
return nullptr;
}
if (ctx->thread_num_ == 0) {
MS_LOG(ERROR) << "context thread num is 0!";
free(opParameter);
return nullptr;
}
auto *kernel = new (std::nothrow) CastFp16CPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new CastFp16CPUKernel fail!";
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();
if (ret != RET_OK) {
MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: "
<< schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(opParameter->type_));
delete kernel;
return nullptr;
}
return kernel;
}
REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Cast, CpuCastFp16KernelCreator)
REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Cast, CPUKernelCreator<CastFp16CPUKernel>)
} // namespace mindspore::kernel

View File

@ -14,12 +14,7 @@
* limitations under the License.
*/
#include "src/runtime/kernel/arm/fp16/concat_fp16.h"
#include "src/runtime/kernel/arm/fp16/common_fp16.h"
#include "src/runtime/kernel/arm/fp32/concat_fp32.h"
#include "nnacl/fp16/concat_fp16.h"
#include "src/kernel_registry.h"
#include "include/errorcode.h"
#include "nnacl/fp16/cast_fp16.h"
using mindspore::kernel::KERNEL_ARCH::kCPU;
using mindspore::lite::KernelRegistrar;
@ -29,18 +24,17 @@ using mindspore::schema::PrimitiveType_Concat;
namespace mindspore::kernel {
int ConcatFp16CPUKernel::Init() {
auto ret = ConcatBaseCPUKernel::Init();
if (ret != RET_OK) {
return ret;
}
if (!InferShapeDone()) {
return RET_OK;
}
return ReSize();
}
int ConcatFp16CPUKernel::ReSize() { return ConcatBaseCPUKernel::ReSize(); }
int ConcatFp16CPUKernel::ReSize() {
concat_param_->axis_ =
concat_param_->axis_ >= 0 ? concat_param_->axis_ : in_tensors_.front()->shape().size() + concat_param_->axis_;
return RET_OK;
}
int ConcatFp16CPUKernel::MallocTmpBuffer() {
for (const auto &in_tensor : in_tensors_) {
@ -64,7 +58,6 @@ int ConcatFp16CPUKernel::MallocTmpBuffer() {
return RET_ERROR;
}
}
return RET_OK;
}
@ -130,28 +123,5 @@ int ConcatFp16CPUKernel::Run() {
return RET_OK;
}
kernel::LiteKernel *CpuConcatFp16KernelCreator(const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, OpParameter *parameter,
const InnerContext *ctx, const kernel::KernelKey &desc,
const mindspore::lite::PrimitiveC *primitive) {
if (parameter == nullptr) {
MS_LOG(ERROR) << "Input parameter is nullptr!";
return nullptr;
}
kernel::LiteKernel *kernel = new (std::nothrow) ConcatFp16CPUKernel(parameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new ConcatCPUKernel fail!";
free(parameter);
return nullptr;
}
auto ret = kernel->Init();
if (ret != RET_OK) {
MS_LOG(ERROR) << "Init kernel failed, name: " << parameter->name_
<< ", type: " << schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(parameter->type_));
delete kernel;
return nullptr;
}
return kernel;
}
REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Concat, CpuConcatFp16KernelCreator)
REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Concat, CPUKernelCreator<ConcatFp16CPUKernel>)
} // namespace mindspore::kernel

View File

@ -18,19 +18,24 @@
#include <arm_neon.h>
#include <vector>
#include "src/lite_kernel.h"
#include "include/context.h"
#include "src/runtime/kernel/arm/base/concat_base.h"
#include "include/errorcode.h"
#include "nnacl/fp16/concat_fp16.h"
#include "nnacl/concat_parameter.h"
#include "nnacl/fp16/cast_fp16.h"
#include "src/lite_kernel.h"
#include "src/runtime/kernel/arm/fp16/common_fp16.h"
using mindspore::lite::InnerContext;
namespace mindspore::kernel {
class ConcatFp16CPUKernel : public ConcatBaseCPUKernel {
class ConcatFp16CPUKernel : public LiteKernel {
public:
ConcatFp16CPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx,
const mindspore::lite::PrimitiveC *primitive)
: ConcatBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {}
: LiteKernel(parameter, inputs, outputs, ctx, primitive) {
concat_param_ = reinterpret_cast<ConcatParameter *>(op_parameter_);
}
~ConcatFp16CPUKernel() = default;
@ -47,6 +52,7 @@ class ConcatFp16CPUKernel : public ConcatBaseCPUKernel {
private:
std::vector<float16_t *> fp16_inputs_;
float16_t *fp16_output_ = nullptr;
ConcatParameter *concat_param_ = nullptr;
};
} // namespace mindspore::kernel
#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP16_CONCAT_FP16_H_

View File

@ -14,15 +14,8 @@
* limitations under the License.
*/
#include "src/runtime/kernel/arm/fp16/crop_fp16.h"
#include "include/errorcode.h"
#include "nnacl/crop_parameter.h"
#include "nnacl/fp16/cast_fp16.h"
#include "nnacl/fp16/crop_fp16.h"
#include "src/kernel_registry.h"
#include "src/runtime/kernel/arm/base/crop_base.h"
#include "src/runtime/kernel/arm/fp16/common_fp16.h"
#include "src/runtime/runtime_api.h"
#include "src/kernel_registry.h"
using mindspore::kernel::KERNEL_ARCH::kCPU;
using mindspore::lite::KernelRegistrar;
@ -31,12 +24,7 @@ using mindspore::lite::RET_OK;
using mindspore::schema::PrimitiveType_Crop;
namespace mindspore::kernel {
int CropFp16CPUKernel::Init() {
auto ret = CropBaseCPUKernel::Init();
if (ret != RET_OK) {
return ret;
}
if (!InferShapeDone()) {
return RET_OK;
}
@ -69,13 +57,13 @@ int CropFp16CPUKernel::Run() {
return RET_ERROR;
}
auto ret = ParallelLaunch(this->context_->thread_pool_, CropFp16Run, this, thread_count_);
auto ret = ParallelLaunch(this->context_->thread_pool_, CropFp16Run, this, crop_para_->thread_count_);
if (ret != RET_OK) {
MS_LOG(ERROR) << "ParallelLaunch failed: " << ret;
FreeInputAndOutput();
}
if (out_tensors_.at(kOutputIndex)->data_type() == kNumberTypeFloat32) {
Float16ToFloat32(output_ptr_, reinterpret_cast<float *>(out_tensors_.at(kOutputIndex)->MutableData()),
Float16ToFloat32(output_ptr_, reinterpret_cast<float *>(out_tensors_.at(kOutputIndex)->data_c()),
out_tensors_.at(kOutputIndex)->ElementsNum());
}
FreeInputAndOutput();
@ -93,29 +81,5 @@ void CropFp16CPUKernel::FreeInputAndOutput() {
}
}
kernel::LiteKernel *CpuCropFp16KernelCreator(const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, OpParameter *opParameter,
const InnerContext *ctx, const kernel::KernelKey &desc,
const mindspore::lite::PrimitiveC *primitive) {
if (opParameter == nullptr) {
MS_LOG(ERROR) << "Input opParameter is nullptr!";
return nullptr;
}
MS_ASSERT(desc.type == schema::PrimitiveType_Crop);
auto *kernel = new (std::nothrow) CropFp16CPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new CropFp16CPUKernel fail!";
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();
if (ret != RET_OK) {
MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: "
<< schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(opParameter->type_));
delete kernel;
return nullptr;
}
return kernel;
}
REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Crop, CpuCropFp16KernelCreator)
REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Crop, CPUKernelCreator<CropFp16CPUKernel>)
} // namespace mindspore::kernel

View File

@ -18,11 +18,14 @@
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP16_CROP_H_
#include <arm_neon.h>
#include <vector>
#include "include/errorcode.h"
#include "nnacl/crop_parameter.h"
#include "nnacl/fp16/cast_fp16.h"
#include "nnacl/fp16/crop_fp16.h"
#include "src/lite_kernel.h"
#include "src/runtime/kernel/arm/base/crop_base.h"
#include "src/runtime/kernel/arm/fp16/common_fp16.h"
namespace mindspore::kernel {
class CropFp16CPUKernel : public CropBaseCPUKernel {
@ -30,10 +33,7 @@ class CropFp16CPUKernel : public CropBaseCPUKernel {
CropFp16CPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx,
const mindspore::lite::PrimitiveC *primitive)
: CropBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {
crop_para_ = reinterpret_cast<CropParameter *>(op_parameter_);
crop_para_->thread_count_ = op_parameter_->thread_num_;
}
: CropBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {}
~CropFp16CPUKernel() override = default;
int Init() override;
@ -44,7 +44,6 @@ class CropFp16CPUKernel : public CropBaseCPUKernel {
private:
float16_t *input_ptr_ = nullptr;
float16_t *output_ptr_ = nullptr;
CropParameter *crop_para_;
void FreeInputAndOutput();
};
} // namespace mindspore::kernel

View File

@ -15,12 +15,8 @@
*/
#include "src/runtime/kernel/arm/fp16/fullconnection_fp16.h"
#include "nnacl/fp16/matmul_fp16.h"
#include "nnacl/fp16/cast_fp16.h"
#include "src/runtime/runtime_api.h"
#include "include/errorcode.h"
#include "src/kernel_registry.h"
#include "src/runtime/kernel/arm/base/dequant.h"
using mindspore::lite::KernelRegistrar;
using mindspore::lite::RET_ERROR;
@ -34,19 +30,19 @@ FullconnectionFP16CPUKernel::~FullconnectionFP16CPUKernel() { FreeTmpBuffer(); }
void FullconnectionFP16CPUKernel::FreeTmpBuffer() {
if (a_pack_ptr_ != nullptr) {
ctx_->allocator->Free(a_pack_ptr_);
context_->allocator->Free(a_pack_ptr_);
a_pack_ptr_ = nullptr;
}
if (b_pack_ptr_ != nullptr) {
ctx_->allocator->Free(b_pack_ptr_);
context_->allocator->Free(b_pack_ptr_);
b_pack_ptr_ = nullptr;
}
if (bias_ptr_ != nullptr) {
ctx_->allocator->Free(bias_ptr_);
context_->allocator->Free(bias_ptr_);
bias_ptr_ = nullptr;
}
if (output_fp16_ != nullptr) {
ctx_->allocator->Free(output_fp16_);
context_->allocator->Free(output_fp16_);
output_fp16_ = nullptr;
}
}
@ -60,7 +56,7 @@ int FullconnectionFP16CPUKernel::ReSize() {
fc_param_->deep_ = (in_tensors_.at(1)->shape()).at(1);
fc_param_->row_16_ = UP_ROUND(fc_param_->row_, C16NUM);
fc_param_->col_8_ = UP_ROUND(fc_param_->col_, C8NUM);
thread_count_ = MSMIN(thread_count_, UP_DIV(fc_param_->col_, C8NUM));
thread_count_ = MSMIN(op_parameter_->thread_num_, UP_DIV(fc_param_->col_, C8NUM));
thread_stride_ = UP_DIV(UP_DIV(fc_param_->col_, C8NUM), thread_count_) * C8NUM;
if (row == 1) is_vector_input_ = true;
@ -74,7 +70,7 @@ int FullconnectionFP16CPUKernel::ReSize() {
b_pack_col = fc_param_->col_8_;
}
a_pack_ptr_ =
reinterpret_cast<float16_t *>(ctx_->allocator->Malloc(a_pack_row * fc_param_->deep_ * sizeof(float16_t)));
reinterpret_cast<float16_t *>(context_->allocator->Malloc(a_pack_row * fc_param_->deep_ * sizeof(float16_t)));
if (a_pack_ptr_ == nullptr) {
FreeTmpBuffer();
return RET_MEMORY_FAILED;
@ -82,7 +78,7 @@ int FullconnectionFP16CPUKernel::ReSize() {
memset(a_pack_ptr_, 0, a_pack_row * fc_param_->deep_ * sizeof(float16_t));
b_pack_ptr_ =
reinterpret_cast<float16_t *>(ctx_->allocator->Malloc(b_pack_col * fc_param_->deep_ * sizeof(float16_t)));
reinterpret_cast<float16_t *>(context_->allocator->Malloc(b_pack_col * fc_param_->deep_ * sizeof(float16_t)));
if (b_pack_ptr_ == nullptr) {
FreeTmpBuffer();
return RET_MEMORY_FAILED;
@ -110,7 +106,7 @@ int FullconnectionFP16CPUKernel::ReSize() {
}
if (in_tensors_.size() == 3) {
bias_ptr_ = reinterpret_cast<float16_t *>(ctx_->allocator->Malloc(b_pack_col * sizeof(float16_t)));
bias_ptr_ = reinterpret_cast<float16_t *>(context_->allocator->Malloc(b_pack_col * sizeof(float16_t)));
if (bias_ptr_ == nullptr) {
FreeTmpBuffer();
return RET_MEMORY_FAILED;
@ -121,7 +117,7 @@ int FullconnectionFP16CPUKernel::ReSize() {
if (out_tensors_.at(0)->data_type() == kNumberTypeFloat32) {
output_fp16_ =
reinterpret_cast<float16_t *>(ctx_->allocator->Malloc(fc_param_->row_ * fc_param_->col_ * sizeof(float16_t)));
reinterpret_cast<float16_t *>(context_->allocator->Malloc(fc_param_->row_ * fc_param_->col_ * sizeof(float16_t)));
if (output_fp16_ == nullptr) {
FreeTmpBuffer();
return RET_MEMORY_FAILED;

View File

@ -17,21 +17,24 @@
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP16_FULLCONNECTION_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP16_FULLCONNECTION_H_
#ifdef ENABLE_NEON
#include <arm_neon.h>
#endif
#include <vector>
#include "src/lite_kernel.h"
#include "include/errorcode.h"
#include "nnacl/matmul_parameter.h"
#include "src/runtime/kernel/arm/base/fullconnection_base.h"
#include "nnacl/fp16/matmul_fp16.h"
#include "nnacl/fp16/cast_fp16.h"
#include "src/lite_kernel.h"
#include "src/runtime/kernel/arm/base/dequant.h"
namespace mindspore::kernel {
class FullconnectionFP16CPUKernel : public FullconnectionBaseCPUKernel {
class FullconnectionFP16CPUKernel : public LiteKernel {
public:
explicit FullconnectionFP16CPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx,
const mindspore::lite::PrimitiveC *primitive)
: FullconnectionBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {}
: LiteKernel(parameter, inputs, outputs, ctx, primitive) {
fc_param_ = reinterpret_cast<MatMulParameter *>(op_parameter_);
}
~FullconnectionFP16CPUKernel() override;
int Init() override;
int ReSize() override;
@ -46,6 +49,7 @@ class FullconnectionFP16CPUKernel : public FullconnectionBaseCPUKernel {
void FreeTmpBuffer();
private:
MatMulParameter *fc_param_ = nullptr;
float16_t *a_pack_ptr_ = nullptr;
float16_t *b_pack_ptr_ = nullptr;
float16_t *bias_ptr_ = nullptr;
@ -54,6 +58,8 @@ class FullconnectionFP16CPUKernel : public FullconnectionBaseCPUKernel {
float16_t *a_ptr_ = nullptr;
float16_t *b_ptr_ = nullptr;
bool is_vector_input_ = false;
int thread_count_ = 1;
int thread_stride_ = 0;
};
} // namespace mindspore::kernel

View File

@ -107,7 +107,7 @@ int MatmulFP16CPUKernel::MallocMatrixBBuffer() {
return RET_MEMORY_FAILED;
}
memset(b_pack_ptr_, 0, params_->batch * params_->col_8_ * params_->deep_ * sizeof(float16_t));
thread_count_ = MSMIN(thread_count_, UP_DIV(params_->col_, C8NUM));
thread_count_ = MSMIN(op_parameter_->thread_num_, UP_DIV(params_->col_, C8NUM));
thread_stride_ = UP_DIV(UP_DIV(params_->col_, C8NUM), thread_count_) * C8NUM;
return RET_OK;
}
@ -228,7 +228,7 @@ int MatmulFP16CPUKernel::Init() {
int MatmulFP16CPUKernel::MallocFp16Output() {
if (out_tensors_[0]->data_type() == kNumberTypeFloat32) {
output_ptr_ = reinterpret_cast<float16_t *>(
ctx_->allocator->Malloc(params_->batch * params_->row_ * params_->col_ * sizeof(float16_t)));
context_->allocator->Malloc(params_->batch * params_->row_ * params_->col_ * sizeof(float16_t)));
if (output_ptr_ == nullptr) {
MS_LOG(ERROR) << "malloc output_ptr_ failed.";
return RET_MEMORY_FAILED;
@ -313,7 +313,7 @@ int MatmulFP16CPUKernel::Run() {
auto size = out_tensor->ElementsNum();
auto out_tensor_data = reinterpret_cast<float *>(out_tensor->data_c());
Float16ToFloat32(output_ptr_, out_tensor_data, size);
ctx_->allocator->Free(output_ptr_);
context_->allocator->Free(output_ptr_);
}
if (!params_->a_const_) {
context_->allocator->Free(a_pack_ptr_);

View File

@ -23,15 +23,16 @@
#include <vector>
#include "src/lite_kernel.h"
#include "nnacl/matmul_parameter.h"
#include "src/runtime/kernel/arm/base/matmul_base.h"
namespace mindspore::kernel {
class MatmulFP16CPUKernel : public MatmulBaseCPUKernel {
class MatmulFP16CPUKernel : public LiteKernel {
public:
explicit MatmulFP16CPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx,
const mindspore::lite::PrimitiveC *primitive)
: MatmulBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {}
: LiteKernel(parameter, inputs, outputs, ctx, primitive) {
params_ = reinterpret_cast<MatMulParameter *>(op_parameter_);
}
~MatmulFP16CPUKernel() override;
int Init() override;
int ReSize() override;
@ -50,6 +51,7 @@ class MatmulFP16CPUKernel : public MatmulBaseCPUKernel {
void FreeTmpBuffer();
private:
MatMulParameter *params_ = nullptr;
float16_t *a_pack_ptr_ = nullptr;
float16_t *b_pack_ptr_ = nullptr;
float16_t *bias_ptr_ = nullptr;
@ -57,6 +59,8 @@ class MatmulFP16CPUKernel : public MatmulBaseCPUKernel {
float16_t *current_a_ = nullptr;
float16_t *current_b_ = nullptr;
float16_t *current_c_ = nullptr;
int thread_stride_ = 0;
int thread_count_ = 0;
};
} // namespace mindspore::kernel

View File

@ -91,24 +91,5 @@ void PadFp16CPUKernel::FreeInputAndOutput() {
}
}
kernel::LiteKernel *CpuPadFp16KernelCreator(const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, OpParameter *opParameter,
const lite::InnerContext *ctx, const kernel::KernelKey &desc,
const mindspore::lite::PrimitiveC *primitive) {
auto *kernel = new (std::nothrow) PadFp16CPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new PadFp16CPUKernel fail!";
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();
if (ret != RET_OK) {
MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: "
<< schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(opParameter->type_));
delete kernel;
return nullptr;
}
return kernel;
}
REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Pad, CpuPadFp16KernelCreator)
REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Pad, CPUKernelCreator<PadFp16CPUKernel>)
} // namespace mindspore::kernel

View File

@ -112,30 +112,5 @@ int PoolingFp16CPUKernel::Run() {
return RET_OK;
}
kernel::LiteKernel *CpuPoolingFp16KernelCreator(const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, OpParameter *opParameter,
const InnerContext *ctx, const kernel::KernelKey &desc,
const mindspore::lite::PrimitiveC *primitive) {
if (opParameter == nullptr) {
MS_LOG(ERROR) << "Input opParameter is nullptr!";
return nullptr;
}
MS_ASSERT(desc.type == schema::PrimitiveType_Pooling);
auto *kernel = new (std::nothrow) PoolingFp16CPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new PoolingCPUKernel fail!";
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();
if (ret != RET_OK) {
MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: "
<< schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(opParameter->type_));
delete kernel;
return nullptr;
}
return kernel;
}
REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Pooling, CpuPoolingFp16KernelCreator)
REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Pooling, CPUKernelCreator<PoolingFp16CPUKernel>)
} // namespace mindspore::kernel

View File

@ -197,5 +197,5 @@ kernel::LiteKernel *CpuQuantDTypeCastFp16KernelCreator(const std::vector<lite::T
}
return kernel;
}
REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_QuantDTypeCast, CpuQuantDTypeCastFp16KernelCreator)
REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_QuantDTypeCast, CPUKernelCreator<QuantDTypeCastFp16CPUKernel>)
} // namespace mindspore::kernel

View File

@ -157,54 +157,5 @@ int ReduceFp16CPUKernel::MallocTmpBuffer() {
return RET_OK;
}
kernel::LiteKernel *CpuReduceFp16KernelCreator(const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, OpParameter *opParameter,
const lite::InnerContext *ctx, const kernel::KernelKey &desc,
const mindspore::lite::PrimitiveC *primitive) {
MS_ASSERT(opParameter != nullptr);
MS_ASSERT(desc.type == schema::PrimitiveType_Reduce);
if (opParameter == nullptr) {
MS_LOG(ERROR) << "Reduce opParameter nullptr";
return nullptr;
}
if (desc.type != schema::PrimitiveType_Reduce) {
MS_LOG(ERROR) << "Reduce op desc.type should be PrimitiveType_Reduce, got " << desc.type;
return nullptr;
}
auto *kernel = new (std::nothrow) ReduceFp16CPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "Reduce new ReduceCPUKernel failed.";
return nullptr;
}
auto ret = kernel->Init();
if (ret != RET_OK) {
MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: "
<< schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(opParameter->type_));
delete kernel;
return nullptr;
}
return kernel;
}
kernel::LiteKernel *CpuMeanFp16KernelCreator(const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, OpParameter *opParameter,
const lite::InnerContext *ctx, const kernel::KernelKey &desc,
const mindspore::lite::PrimitiveC *primitive) {
auto *kernel = new (std::nothrow) ReduceFp16CPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "Reduce new ReduceCPUKernel failed.";
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();
if (ret != RET_OK) {
MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: "
<< schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(opParameter->type_));
delete kernel;
return nullptr;
}
return kernel;
}
REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Reduce, CpuReduceFp16KernelCreator)
REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Reduce, CPUKernelCreator<ReduceFp16CPUKernel>)
} // namespace mindspore::kernel

View File

@ -69,30 +69,5 @@ int ReshapeFp16CPUKernel::Run() {
return RET_OK;
}
kernel::LiteKernel *CpuReshapeFp16KernelCreator(const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, OpParameter *opParameter,
const InnerContext *ctx, const kernel::KernelKey &desc,
const mindspore::lite::PrimitiveC *primitive) {
if (opParameter == nullptr) {
MS_LOG(ERROR) << "Input opParameter is nullptr!";
return nullptr;
}
MS_ASSERT(desc.type == schema::PrimitiveType_Reshape);
auto *kernel = new (std::nothrow) ReshapeFp16CPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new ReshapeFp16CPUKernel fail!";
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();
if (ret != RET_OK) {
MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: "
<< schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(opParameter->type_));
delete kernel;
return nullptr;
}
return kernel;
}
REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Reshape, CpuReshapeFp16KernelCreator)
REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Reshape, CPUKernelCreator<ReshapeFp16CPUKernel>)
} // namespace mindspore::kernel

View File

@ -181,31 +181,5 @@ void ScaleFp16CPUKernel::FreeTmpBuffer() {
}
}
kernel::LiteKernel *CpuScaleFp16KernelCreator(const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, OpParameter *opParameter,
const lite::InnerContext *ctx, const kernel::KernelKey &desc,
const mindspore::lite::PrimitiveC *primitive) {
MS_ASSERT(desc.type == schema::PrimitiveType_Scale);
if (opParameter == nullptr) {
MS_LOG(ERROR) << "opParameter is nullptr";
return nullptr;
}
auto *kernel = new (std::nothrow) ScaleFp16CPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "New kernel fails.";
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();
if (ret != RET_OK) {
MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: "
<< schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(opParameter->type_));
delete kernel;
return nullptr;
}
return kernel;
}
REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Scale, CpuScaleFp16KernelCreator)
REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Scale, CPUKernelCreator<ScaleFp16CPUKernel>)
} // namespace mindspore::kernel

View File

@ -65,25 +65,5 @@ void SliceFp16CPUKernel::FreeInputAndOutput() {
}
}
kernel::LiteKernel *CpuSliceFp16KernelCreator(const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, OpParameter *opParameter,
const lite::InnerContext *ctx, const kernel::KernelKey &desc,
const mindspore::lite::PrimitiveC *primitive) {
auto *kernel = new (std::nothrow) SliceFp16CPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new SliceFp16CPUKernel fail!";
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();
if (ret != RET_OK) {
MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: "
<< schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(opParameter->type_));
delete kernel;
return nullptr;
}
return kernel;
}
REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Slice, CpuSliceFp16KernelCreator)
REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Slice, CPUKernelCreator<SliceFp16CPUKernel>)
} // namespace mindspore::kernel

View File

@ -119,31 +119,5 @@ int SoftmaxFp16CPUKernel::Run() {
return RET_OK;
}
kernel::LiteKernel *CpuSoftmaxFp16KernelCreator(const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, OpParameter *opParameter,
const lite::InnerContext *ctx, const kernel::KernelKey &desc,
const mindspore::lite::PrimitiveC *primitive) {
if (opParameter == nullptr) {
MS_LOG(ERROR) << "Input opParameter is nullptr!";
return nullptr;
}
MS_ASSERT(desc.type == schema::PrimitiveType_SoftMax);
auto *kernel = new (std::nothrow) SoftmaxFp16CPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new SoftmaxFp16CPUKernel fail!";
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();
if (ret != RET_OK) {
MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: "
<< schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(opParameter->type_));
delete kernel;
return nullptr;
}
return kernel;
}
REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_SoftMax, CpuSoftmaxFp16KernelCreator)
REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_SoftMax, CPUKernelCreator<SoftmaxFp16CPUKernel>)
} // namespace mindspore::kernel

View File

@ -116,29 +116,5 @@ void SplitFp16CPUKernel::FreeInputAndOutput() {
}
}
kernel::LiteKernel *CpuSplitFp16KernelCreator(const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, OpParameter *opParameter,
const InnerContext *ctx, const kernel::KernelKey &desc,
const mindspore::lite::PrimitiveC *primitive) {
if (opParameter == nullptr) {
MS_LOG(ERROR) << "Input opParameter is nullptr!";
return nullptr;
}
MS_ASSERT(desc.type == schema::PrimitiveType_Split);
auto *kernel = new (std::nothrow) SplitFp16CPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new SplitFp16CPUKernel fail!";
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();
if (ret != RET_OK) {
MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: "
<< schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(opParameter->type_));
delete kernel;
return nullptr;
}
return kernel;
}
REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Split, CpuSplitFp16KernelCreator)
REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Split, CPUKernelCreator<SplitFp16CPUKernel>)
} // namespace mindspore::kernel

View File

@ -100,31 +100,5 @@ int StackFp16CPUKernel::Run() {
return RET_OK;
}
kernel::LiteKernel *CpuStackFp16KernelCreator(const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, OpParameter *op_parameter,
const lite::InnerContext *ctx, const kernel::KernelKey &desc,
const mindspore::lite::PrimitiveC *primitive) {
if (op_parameter == nullptr) {
MS_LOG(ERROR) << "Input op_parameter is nullptr!";
return nullptr;
}
MS_ASSERT(desc.type == schema::PrimitiveType_Stack);
auto *kernel = new (std::nothrow) StackFp16CPUKernel(op_parameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new StackFp16CPUKernel fail!";
free(op_parameter);
return nullptr;
}
auto ret = kernel->Init();
if (ret != RET_OK) {
MS_LOG(ERROR) << "Init kernel failed, name: " << op_parameter->name_ << ", type: "
<< schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(op_parameter->type_));
delete kernel;
return nullptr;
}
return kernel;
}
REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Stack, CpuStackFp16KernelCreator)
REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Stack, CPUKernelCreator<StackFp16CPUKernel>)
} // namespace mindspore::kernel

View File

@ -88,31 +88,5 @@ int TransposeFp16CPUKernel::Run() {
return ret;
}
kernel::LiteKernel *CpuTransposeFp16KernelCreator(const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, OpParameter *opParameter,
const lite::InnerContext *ctx, const kernel::KernelKey &desc,
const mindspore::lite::PrimitiveC *primitive) {
MS_ASSERT(desc.type == schema::PrimitiveType_Transpose);
if (opParameter == nullptr) {
MS_LOG(ERROR) << "desc type is not Transpose";
return nullptr;
}
auto *kernel = new (std::nothrow) TransposeFp16CPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "New kernel fails.";
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();
if (ret != RET_OK) {
MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: "
<< schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(opParameter->type_));
delete kernel;
return nullptr;
}
return kernel;
}
REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Transpose, CpuTransposeFp16KernelCreator)
REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Transpose, CPUKernelCreator<TransposeFp16CPUKernel>)
} // namespace mindspore::kernel

View File

@ -14,39 +14,37 @@
* limitations under the License.
*/
#include "src/runtime/kernel/arm/fp32/batch_to_space_fp32.h"
#include <vector>
#include "schema/model_generated.h"
#include "src/kernel_registry.h"
#include "nnacl/batch_to_space.h"
#include "include/errorcode.h"
using mindspore::lite::RET_OK;
using mindspore::lite::KernelRegistrar;
using mindspore::schema::PrimitiveType_BatchToSpace;
using mindspore::schema::PrimitiveType_BatchToSpaceND;
namespace mindspore::kernel {
int BatchToSpaceCPUKernel::Init() {
auto ret = BatchToSpaceBaseCPUKernel::Init();
if (ret != RET_OK) {
return ret;
}
MS_ASSERT(in_tensors_.at(0)->format() == schema::Format::Format_NHWC);
if (!InferShapeDone()) {
return RET_OK;
return lite::RET_OK;
}
return ReSize();
}
int BatchToSpaceCPUKernel::ReSize() { return BatchToSpaceBaseCPUKernel::ReSize(); }
int BatchToSpaceCPUKernel::ReSize() {
MS_ASSERT(in_tensors_.at(0)->shape().size() == 4);
return lite::RET_OK;
}
int BatchToSpaceCPUKernel::Run() {
auto input = in_tensors_[0];
auto output = out_tensors_[0];
const float *input_data = reinterpret_cast<const float *>(input->MutableData());
const float *input_data = reinterpret_cast<const float *>(input->data_c());
float *output_data = reinterpret_cast<float *>(output->MutableData());
auto in_shape = input->shape();
auto out_shape = output->shape();
BatchToSpaceParameter *param = reinterpret_cast<BatchToSpaceParameter *>(this->op_parameter_);
if (IsNoCrop()) {
if (param->no_crop_) {
BatchToSpaceNoCropForNHWC(input_data, output_data, in_shape.data(), out_shape[0], param->block_shape_,
sizeof(float));
} else {
@ -54,6 +52,9 @@ int BatchToSpaceCPUKernel::Run() {
sizeof(float));
}
return RET_OK;
return lite::RET_OK;
}
REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_BatchToSpace, CPUKernelCreator<BatchToSpaceCPUKernel>)
REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_BatchToSpaceND, CPUKernelCreator<BatchToSpaceCPUKernel>)
} // namespace mindspore::kernel

View File

@ -15,17 +15,19 @@
*/
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_BATCH_TO_SPACE_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_BATCH_TO_SPACE_H_
#include <vector>
#include "src/runtime/kernel/arm/base/batch_to_space_base.h"
#include "include/errorcode.h"
#include "nnacl/batch_to_space.h"
#include "src/lite_kernel.h"
namespace mindspore::kernel {
class BatchToSpaceCPUKernel : public BatchToSpaceBaseCPUKernel {
class BatchToSpaceCPUKernel : public LiteKernel {
public:
BatchToSpaceCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx,
const mindspore::lite::PrimitiveC *primitive)
: BatchToSpaceBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {}
: LiteKernel(parameter, inputs, outputs, ctx, primitive) {}
~BatchToSpaceCPUKernel() = default;
int Init() override;

View File

@ -15,13 +15,8 @@
*/
#include "src/runtime/kernel/arm/fp32/concat_fp32.h"
#include <vector>
#include "nnacl/fp32/concat_fp32.h"
#include "src/kernel_registry.h"
#include "schema/model_generated.h"
#include "include/errorcode.h"
#include "src/runtime/runtime_api.h"
#include "src/runtime/thread_pool.h"
using mindspore::kernel::KERNEL_ARCH::kCPU;
using mindspore::lite::KernelRegistrar;
@ -31,18 +26,17 @@ using mindspore::schema::PrimitiveType_Concat;
namespace mindspore::kernel {
int ConcatCPUKernel::Init() {
auto ret = ConcatBaseCPUKernel::Init();
if (ret != RET_OK) {
return ret;
}
if (!InferShapeDone()) {
return RET_OK;
}
return ReSize();
}
int ConcatCPUKernel::ReSize() { return ConcatBaseCPUKernel::ReSize(); }
int ConcatCPUKernel::ReSize() {
concat_param_->axis_ =
concat_param_->axis_ >= 0 ? concat_param_->axis_ : in_tensors_.front()->shape().size() + concat_param_->axis_;
return RET_OK;
}
int ConcatCPUKernel::DoConcat(int task_id) {
auto input_num = in_tensors_.size();
@ -60,7 +54,7 @@ int ConcatCPUKernel::DoConcat(int task_id) {
auto output_addr = out_tensors_.at(0)->MutableData();
Concat(inputs_addr.data(), input_num, concat_param_->axis_, inputs_output_shape.data(), output_shape.size(),
output_addr, task_id, thread_count_);
output_addr, task_id, op_parameter_->thread_num_);
return RET_OK;
}
@ -75,7 +69,10 @@ int ConcatsRun(void *cdata, int task_id) {
}
int ConcatCPUKernel::Run() {
int error_code = ParallelLaunch(this->context_->thread_pool_, ConcatsRun, this, thread_count_);
int error_code = ParallelLaunch(this->context_->thread_pool_, ConcatsRun, this, op_parameter_->thread_num_);
return error_code;
}
REG_KERNEL(kCPU, kNumberTypeInt32, PrimitiveType_Concat, CPUKernelCreator<ConcatCPUKernel>)
REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_Concat, CPUKernelCreator<ConcatCPUKernel>)
} // namespace mindspore::kernel

View File

@ -18,19 +18,24 @@
#include <vector>
#include "src/lite_kernel.h"
#include "nnacl/fp32/concat_fp32.h"
#include "nnacl/concat_parameter.h"
#include "include/errorcode.h"
#include "src/runtime/runtime_api.h"
#include "src/runtime/thread_pool.h"
#include "include/context.h"
#include "src/runtime/kernel/arm/base/concat_base.h"
using mindspore::lite::InnerContext;
namespace mindspore::kernel {
class ConcatCPUKernel : public ConcatBaseCPUKernel {
class ConcatCPUKernel : public LiteKernel {
public:
ConcatCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx,
const mindspore::lite::PrimitiveC *primitive)
: ConcatBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {}
: LiteKernel(parameter, inputs, outputs, ctx, primitive) {
concat_param_ = reinterpret_cast<ConcatParameter *>(op_parameter_);
}
~ConcatCPUKernel() = default;
@ -38,6 +43,9 @@ class ConcatCPUKernel : public ConcatBaseCPUKernel {
int ReSize() override;
int DoConcat(int task_id);
int Run() override;
private:
ConcatParameter *concat_param_ = nullptr;
};
} // namespace mindspore::kernel

View File

@ -16,14 +16,10 @@
#include "src/runtime/kernel/arm/fp32/crop_fp32.h"
#include "schema/model_generated.h"
#include "src/kernel_registry.h"
#include "nnacl/fp32/crop_fp32.h"
#include "nnacl/crop_parameter.h"
#include "include/errorcode.h"
#include "src/runtime/runtime_api.h"
using mindspore::lite::KernelRegistrar;
using mindspore::lite::RET_ERROR;
using mindspore::lite::RET_FORMAT_ERR;
using mindspore::lite::RET_NULL_PTR;
using mindspore::lite::RET_OK;
using mindspore::schema::PrimitiveType_Crop;
@ -40,34 +36,42 @@ int CropLaunch(void *cdata, int task_id) {
}
} // namespace
int CropCPUKernel::Init() { return RET_OK; }
int CropCPUKernel::Init() {
if (!InferShapeDone()) {
return RET_OK;
}
return ReSize();
}
int CropCPUKernel::ReSize() { return CropBaseCPUKernel::ReSize(); }
int CropCPUKernel::CropParallelRun(int thread_id) {
auto input = in_tensors_[0];
auto output = out_tensors_[0];
float *input_data = reinterpret_cast<float *>(input->MutableData());
float *output_data = reinterpret_cast<float *>(output->MutableData());
auto param = reinterpret_cast<CropParameter *>(op_parameter_);
Crop4D(input_data, output_data, input->shape().data(), output->shape().data(), param, thread_id);
float *input_data = reinterpret_cast<float *>(input->data_c());
float *output_data = reinterpret_cast<float *>(output->data_c());
Crop4D(input_data, output_data, input->shape().data(), output->shape().data(), crop_para_, thread_id);
return RET_OK;
}
int CropCPUKernel::Run() {
auto input = in_tensors_[0];
auto output = out_tensors_[0];
auto param = reinterpret_cast<CropParameter *>(op_parameter_);
if (output->shape()[1] < param->op_parameter_.thread_num_) {
float *input_data = reinterpret_cast<float *>(input->MutableData());
float *output_data = reinterpret_cast<float *>(output->MutableData());
Crop4DNoParallel(input_data, output_data, input->shape().data(), output->shape().data(), param);
if (output->shape()[1] < crop_para_->thread_count_) {
float *input_data = reinterpret_cast<float *>(input->data_c());
float *output_data = reinterpret_cast<float *>(output->data_c());
Crop4DNoParallel(input_data, output_data, input->shape().data(), output->shape().data(), crop_para_);
return RET_OK;
}
auto ret = ParallelLaunch(this->context_->thread_pool_, CropLaunch, this, param->op_parameter_.thread_num_);
auto ret = ParallelLaunch(this->context_->thread_pool_, CropLaunch, this, crop_para_->thread_count_);
if (ret != RET_OK) {
MS_LOG(ERROR) << "Crop launch fail!ret: " << ret;
return RET_ERROR;
}
return RET_OK;
}
REG_KERNEL(kCPU, kNumberTypeInt32, PrimitiveType_Crop, CPUKernelCreator<CropCPUKernel>)
REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_Crop, CPUKernelCreator<CropCPUKernel>)
} // namespace mindspore::kernel

View File

@ -15,7 +15,11 @@
*/
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_CROP_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_CROP_H_
#include <vector>
#include "include/errorcode.h"
#include "nnacl/fp32/crop_fp32.h"
#include "nnacl/crop_parameter.h"
#include "src/lite_kernel.h"
#include "src/runtime/kernel/arm/base/layout_transform.h"
#include "src/runtime/kernel/arm/base/crop_base.h"
@ -29,7 +33,7 @@ class CropCPUKernel : public CropBaseCPUKernel {
: CropBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {}
~CropCPUKernel() = default;
int Init() override;
int ReSize() override { return 0; }
int ReSize() override;
int Run() override;
int CropParallelRun(int thread_id);
};

View File

@ -14,12 +14,8 @@
* limitations under the License.
*/
#include "src/runtime/kernel/arm/fp32/depth_to_space_fp32.h"
#include <vector>
#include "schema/model_generated.h"
#include "src/kernel_registry.h"
#include "nnacl/arithmetic_common.h"
#include "nnacl/depth_to_space.h"
#include "include/errorcode.h"
using mindspore::lite::KernelRegistrar;
using mindspore::lite::RET_ERROR;
@ -29,18 +25,11 @@ using mindspore::lite::RET_PARAM_INVALID;
using mindspore::schema::PrimitiveType_DepthToSpace;
namespace mindspore::kernel {
int DepthToSpaceCPUKernel::Init() {
auto ret = DepthToSpaceBaseCPUKernel::Init();
if (ret != RET_OK) {
return ret;
}
DepthToSpaceParameter *param = reinterpret_cast<DepthToSpaceParameter *>(op_parameter_);
param->data_type_size_ = sizeof(float);
param_->data_type_size_ = sizeof(float);
if (!InferShapeDone()) {
return RET_OK;
}
return ReSize();
}
@ -49,16 +38,17 @@ int DepthToSpaceCPUKernel::ReSize() { return DepthToSpaceBaseCPUKernel::ReSize()
int DepthToSpaceCPUKernel::Run() {
auto input = in_tensors_[0];
auto output = out_tensors_[0];
const float *input_data = reinterpret_cast<const float *>(input->MutableData());
float *output_data = reinterpret_cast<float *>(output->MutableData());
const float *input_data = reinterpret_cast<const float *>(input->data_c());
float *output_data = reinterpret_cast<float *>(output->data_c());
auto in_shape = input->shape();
DepthToSpaceParameter *param = reinterpret_cast<DepthToSpaceParameter *>(op_parameter_);
if (input->format() == schema::Format::Format_NHWC) {
DepthToSpaceForNHWC(input_data, output_data, in_shape.data(), param);
DepthToSpaceForNHWC(input_data, output_data, in_shape.data(), param_);
return RET_OK;
} else {
MS_LOG(ERROR) << "Depth_to_space only support NHWC now!";
return RET_ERROR;
}
}
REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_DepthToSpace, CPUKernelCreator<DepthToSpaceCPUKernel>)
} // namespace mindspore::kernel

View File

@ -17,6 +17,9 @@
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_DEPTH_TO_SPACE_H_
#include <vector>
#include "include/errorcode.h"
#include "nnacl/arithmetic_common.h"
#include "nnacl/depth_to_space.h"
#include "src/runtime/kernel/arm/base/depth_to_space_base.h"
namespace mindspore::kernel {

View File

@ -26,7 +26,6 @@ using mindspore::lite::RET_OK;
using mindspore::schema::PrimitiveType_DetectionPostProcess;
namespace mindspore::kernel {
int DetectionPostProcessCPUKernel::GetInputData() {
if ((in_tensors_.at(0)->data_type() != kNumberTypeFloat32 && in_tensors_.at(0)->data_type() != kNumberTypeFloat) ||
(in_tensors_.at(1)->data_type() != kNumberTypeFloat32 && in_tensors_.at(1)->data_type() != kNumberTypeFloat)) {
@ -37,32 +36,6 @@ int DetectionPostProcessCPUKernel::GetInputData() {
input_scores_ = reinterpret_cast<float *>(in_tensors_.at(1)->data_c());
return RET_OK;
}
kernel::LiteKernel *CpuDetectionPostProcessFp32KernelCreator(const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs,
OpParameter *opParameter, const lite::InnerContext *ctx,
const kernel::KernelKey &desc,
const mindspore::lite::PrimitiveC *primitive) {
if (opParameter == nullptr) {
MS_LOG(ERROR) << "Create kernel failed, opParameter is nullptr, type: PrimitiveType_DetectionPostProcess. ";
return nullptr;
}
MS_ASSERT(desc.type == schema::PrimitiveType_DetectionPostProcess);
auto *kernel = new (std::nothrow) DetectionPostProcessCPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new DetectionPostProcessCPUKernel fail!";
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();
if (ret != RET_OK) {
MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: "
<< schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(opParameter->type_));
delete kernel;
return nullptr;
}
return kernel;
}
REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_DetectionPostProcess, CpuDetectionPostProcessFp32KernelCreator)
REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_DetectionPostProcess,
CPUKernelCreator<DetectionPostProcessCPUKernel>)
} // namespace mindspore::kernel

View File

@ -15,10 +15,17 @@
*/
#include "src/runtime/kernel/arm/fp32/fullconnection_fp32.h"
#include "src/kernel_registry.h"
#include "src/runtime/runtime_api.h"
using mindspore::kernel::KERNEL_ARCH::kCPU;
using mindspore::lite::KernelRegistrar;
using mindspore::lite::RET_ERROR;
using mindspore::lite::RET_INVALID_OP_ATTR;
using mindspore::lite::RET_MEMORY_FAILED;
using mindspore::lite::RET_NULL_PTR;
using mindspore::lite::RET_OK;
using mindspore::schema::PrimitiveType_FullConnection;
namespace mindspore::kernel {
FullconnectionCPUKernel::~FullconnectionCPUKernel() {
@ -63,7 +70,7 @@ int FullconnectionCPUKernel::ReSize() {
fc_param_->row_6_ = UP_ROUND(fc_param_->row_, C6NUM);
fc_param_->row_4_ = UP_ROUND(fc_param_->row_, C4NUM);
thread_count_ = MSMIN(thread_count_, UP_DIV(fc_param_->col_align_, col_tile));
thread_count_ = MSMIN(op_parameter_->thread_num_, UP_DIV(fc_param_->col_align_, col_tile));
thread_stride_ = UP_DIV(UP_DIV(fc_param_->col_align_, col_tile), thread_count_);
#ifdef ENABLE_ARM
@ -214,4 +221,57 @@ int FullconnectionCPUKernel::Run() {
return RET_OK;
}
kernel::LiteKernel *CpuFullConnectionFp32KernelCreator(const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs,
OpParameter *opParameter, const lite::InnerContext *ctx,
const kernel::KernelKey &desc,
const mindspore::lite::PrimitiveC *primitive) {
MS_ASSERT(opParameter != nullptr);
MS_ASSERT(desc.type == schema::PrimitiveType_FullConnection);
auto *weight_tensor = inputs.at(kWeightIndex);
// data of second tensor of fc may be nullptr
auto *restore_data = weight_tensor->data_c();
auto restore_type = weight_tensor->data_type();
bool dequant_flag =
!weight_tensor->quant_params().empty() && weight_tensor->quant_params().front().inited && restore_data != nullptr;
if (dequant_flag) {
auto *dequant_weight = kernel::DequantUtil::DequantWeight(weight_tensor);
if (dequant_weight == nullptr) {
MS_LOG(ERROR) << "dequant data is nullptr.";
return nullptr;
}
weight_tensor->set_data(dequant_weight);
}
auto kernel = new (std::nothrow) FullconnectionCPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (!kernel) {
MS_LOG(ERROR) << "kernel is nullptr.";
if (dequant_flag) {
weight_tensor->FreeData();
weight_tensor->set_data(restore_data);
weight_tensor->set_data_type(restore_type);
}
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();
if (ret != RET_OK) {
MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: "
<< schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(opParameter->type_));
if (dequant_flag) {
weight_tensor->FreeData();
weight_tensor->set_data(restore_data);
weight_tensor->set_data_type(restore_type);
}
delete kernel;
return nullptr;
}
if (dequant_flag) {
weight_tensor->FreeData();
weight_tensor->set_data(restore_data);
weight_tensor->set_data_type(restore_type);
}
return kernel;
}
REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_FullConnection, CpuFullConnectionFp32KernelCreator)
} // namespace mindspore::kernel

View File

@ -18,20 +18,22 @@
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_FULLCONNECTION_H_
#include <vector>
#include "src/runtime/kernel/arm/base/fullconnection_base.h"
#include "include/context.h"
#include "include/errorcode.h"
#include "nnacl/fp32/matmul_fp32.h"
#include "src/lite_kernel.h"
#include "src/runtime/kernel/arm/base/dequant.h"
using mindspore::lite::InnerContext;
namespace mindspore::kernel {
class FullconnectionCPUKernel : public FullconnectionBaseCPUKernel {
class FullconnectionCPUKernel : public LiteKernel {
public:
FullconnectionCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, const InnerContext *ctx,
const mindspore::lite::PrimitiveC *primitive)
: FullconnectionBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {}
: LiteKernel(parameter, inputs, outputs, ctx, primitive) {
fc_param_ = reinterpret_cast<MatMulParameter *>(op_parameter_);
}
~FullconnectionCPUKernel() override;
int Init() override;
@ -47,6 +49,7 @@ class FullconnectionCPUKernel : public FullconnectionBaseCPUKernel {
void InitMatrixB(const float *src_ptr, float *dst_ptr);
private:
MatMulParameter *fc_param_ = nullptr;
float *a_pack_ptr_ = nullptr;
float *b_pack_ptr_ = nullptr;
float *c_ptr_ = nullptr;
@ -54,6 +57,8 @@ class FullconnectionCPUKernel : public FullconnectionBaseCPUKernel {
float *a_ptr_ = nullptr;
float *b_ptr_ = nullptr;
bool is_vector_input_ = false;
int thread_count_ = 1;
int thread_stride_ = 0;
};
} // namespace mindspore::kernel
#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_FULLCONNECTION_H_

View File

@ -129,27 +129,5 @@ int FusedBatchnormCPUKernel::DoExecute(int task_id) {
return RET_OK;
}
kernel::LiteKernel *CpuFusedBatchnormKernelCreator(const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs,
OpParameter *op_parameter, const lite::InnerContext *ctx,
const kernel::KernelKey &desc,
const mindspore::lite::PrimitiveC *primitive) {
FusedBatchnormCPUKernel *kernel =
new (std::nothrow) FusedBatchnormCPUKernel(op_parameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new FusedBatchnormCPUKernel fail!";
free(op_parameter);
return nullptr;
}
auto ret = kernel->Init();
if (ret != RET_OK) {
MS_LOG(ERROR) << "Init kernel failed, name: " << op_parameter->name_ << ", type: "
<< schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(op_parameter->type_));
delete kernel;
return nullptr;
}
return kernel;
}
REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_FusedBatchNorm, CpuFusedBatchnormKernelCreator)
REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_FusedBatchNorm, CPUKernelCreator<FusedBatchnormCPUKernel>)
} // namespace mindspore::kernel

View File

@ -1,88 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "src/runtime/kernel/arm/fp32/gelu_fp32.h"
#include "src/runtime/kernel/arm/base/gelu_base.h"
#include "nnacl/fp32/gelu_fp32.h"
#include "nnacl/gelu_parameter.h"
#include "src/kernel_registry.h"
#include "include/errorcode.h"
#include "src/runtime/runtime_api.h"
using mindspore::kernel::KERNEL_ARCH::kCPU;
using mindspore::lite::KernelRegistrar;
using mindspore::lite::RET_ERROR;
using mindspore::lite::RET_OK;
// using mindspore::schema::PrimitiveType_GeLU;
namespace mindspore::kernel {
int GeLUCPUKernel::Init() {
auto ret = GeLUBaseCPUKernel::Init();
if (ret != RET_OK) {
return ret;
}
if (!InferShapeDone()) {
return RET_OK;
}
return ReSize();
}
int GeLUCPUKernel::ReSize() { return RET_OK; }
int GeLUCPUKernel::GeLU(int task_id) {
int64_t real_dst_count = MSMIN(elements_num_ - task_id * count_unit_, count_unit_);
if (real_dst_count <= 0) {
return lite::RET_OK;
}
float *cur_input_data = input_ptr_ + task_id * count_unit_;
float *cur_output_data = output_ptr_ + task_id * count_unit_;
auto ret = DoGeLU(cur_input_data, cur_output_data, real_dst_count, gelu_param_);
if (ret != RET_OK) {
MS_LOG(ERROR) << "GeLU error task_id[" << task_id << "] error_code[" << ret << "]";
return RET_ERROR;
}
return RET_OK;
}
int GeLURun(void *cdata, int task_id) {
auto g_kernel = reinterpret_cast<GeLUCPUKernel *>(cdata);
auto ret = g_kernel->GeLU(task_id);
if (ret != RET_OK) {
MS_LOG(ERROR) << "GeLURun error task_id[" << task_id << "] error_code[" << ret << "]";
return RET_ERROR;
}
return RET_OK;
}
int GeLUCPUKernel::Run() {
auto in_tensor = in_tensors_.front();
auto out_tensor = out_tensors_.front();
input_ptr_ = reinterpret_cast<float *>(in_tensor->MutableData());
output_ptr_ = reinterpret_cast<float *>(out_tensor->MutableData());
elements_num_ = out_tensor->ElementsNum();
count_unit_ = thread_count_ > 1 ? UP_DIV(elements_num_, thread_count_) : elements_num_;
auto ret = ParallelLaunch(this->context_->thread_pool_, GeLURun, this, thread_count_);
if (ret != RET_OK) {
MS_LOG(ERROR) << "Scale error error_code[" << ret << "]";
return RET_ERROR;
}
return RET_OK;
}
} // namespace mindspore::kernel

View File

@ -1,46 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_GELU_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_GELU_H_
#include <vector>
#include "src/runtime/kernel/arm/base/gelu_base.h"
#include "src/lite_kernel.h"
namespace mindspore::kernel {
class GeLUCPUKernel : public GeLUBaseCPUKernel {
public:
GeLUCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx,
const mindspore::lite::PrimitiveC *primitive)
: GeLUBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {}
~GeLUCPUKernel() override = default;
int Init() override;
int ReSize() override;
int Run() override;
int GeLU(int task_id);
private:
float *input_ptr_ = nullptr;
float *output_ptr_ = nullptr;
int64_t elements_num_ = 0;
int64_t count_unit_ = 0;
};
} // namespace mindspore::kernel
#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_GELU_H_

View File

@ -124,7 +124,7 @@ int MatmulCPUKernel::MallocMatrixBBuffer() {
return RET_MEMORY_FAILED;
}
thread_count_ = MSMIN(thread_count_, UP_DIV(params_->col_align_, col_tile_));
thread_count_ = MSMIN(op_parameter_->thread_num_, UP_DIV(params_->col_align_, col_tile_));
thread_stride_ = UP_DIV(UP_DIV(params_->col_align_, col_tile_), thread_count_);
return RET_OK;
}

View File

@ -20,15 +20,16 @@
#include <vector>
#include "nnacl/matmul_parameter.h"
#include "src/lite_kernel.h"
#include "src/runtime/kernel/arm/base/matmul_base.h"
namespace mindspore::kernel {
class MatmulCPUKernel : public MatmulBaseCPUKernel {
class MatmulCPUKernel : public LiteKernel {
public:
explicit MatmulCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx,
const mindspore::lite::PrimitiveC *primitive)
: MatmulBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {}
: LiteKernel(parameter, inputs, outputs, ctx, primitive) {
params_ = reinterpret_cast<MatMulParameter *>(op_parameter_);
}
~MatmulCPUKernel() override;
int Init() override;
int ReSize() override;
@ -45,6 +46,7 @@ class MatmulCPUKernel : public MatmulBaseCPUKernel {
void FreeTmpBuffer();
private:
MatMulParameter *params_ = nullptr;
float *a_pack_ptr_ = nullptr;
float *b_pack_ptr_ = nullptr;
float *bias_ptr_ = nullptr;
@ -55,6 +57,8 @@ class MatmulCPUKernel : public MatmulBaseCPUKernel {
float *cur_c_ptr_ = nullptr;
bool is_vector_a_ = false;
int col_tile_ = 0;
int thread_stride_ = 0;
int thread_count_ = 0;
};
} // namespace mindspore::kernel

View File

@ -413,4 +413,6 @@ int PadCPUKernel::Run() {
return RET_OK;
}
REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_Pad, CPUKernelCreator<PadCPUKernel>)
} // namespace mindspore::kernel

View File

@ -91,4 +91,6 @@ int PoolingCPUKernel::Run() {
}
return RET_OK;
}
REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_Pooling, CPUKernelCreator<PoolingCPUKernel>)
} // namespace mindspore::kernel

View File

@ -74,4 +74,5 @@ int PowerCPUKernel::RunImpl(int task_id) {
return RET_OK;
}
REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_Power, CPUKernelCreator<PowerCPUKernel>)
} // namespace mindspore::kernel

View File

@ -21,15 +21,14 @@
#include "src/lite_kernel.h"
#include "include/context.h"
#include "nnacl/power.h"
#include "src/runtime/kernel/arm/base/power_base.h"
namespace mindspore::kernel {
class PowerCPUKernel : public PowerBaseCPUKernel {
class PowerCPUKernel : public LiteKernel {
public:
PowerCPUKernel(OpParameter *param, const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx,
const mindspore::lite::PrimitiveC *primitive)
: PowerBaseCPUKernel(param, inputs, outputs, ctx, primitive),
: LiteKernel(param, inputs, outputs, ctx, primitive),
thread_count_(ctx->thread_num_),
power_(reinterpret_cast<PowerParameter *>(op_parameter_)->power_),
scale_(reinterpret_cast<PowerParameter *>(op_parameter_)->scale_),

View File

@ -234,4 +234,8 @@ void ReduceCPUKernel::FreeTmpBuffer() {
}
data_buffers_.clear();
}
REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_Reduce, CPUKernelCreator<ReduceCPUKernel>)
REG_KERNEL(kCPU, kNumberTypeInt, PrimitiveType_Reduce, CPUKernelCreator<ReduceCPUKernel>)
REG_KERNEL(kCPU, kNumberTypeInt32, PrimitiveType_Reduce, CPUKernelCreator<ReduceCPUKernel>)
} // namespace mindspore::kernel

View File

@ -28,20 +28,20 @@ using mindspore::lite::RET_OK;
using mindspore::schema::PrimitiveType_Reshape;
namespace mindspore::kernel {
int ReshapeCPUKernel::Init() {
ReshapeBaseCPUKernel::Init();
return RET_OK;
}
int ReshapeCPUKernel::Init() { return RET_OK; }
int ReshapeCPUKernel::ReSize() { return RET_OK; }
int ReshapeCPUKernel::Run() {
auto input_ptr = in_tensors_.at(kInputIndex)->MutableData();
auto output_ptr = out_tensors_.at(kOutputIndex)->MutableData();
auto input_ptr = in_tensors_.at(kInputIndex)->data_c();
auto output_ptr = out_tensors_.at(kOutputIndex)->data_c();
size_t data_size = in_tensors_.at(kInputIndex)->Size();
MS_ASSERT(input_ptr);
MS_ASSERT(output_ptr);
Reshape(input_ptr, output_ptr, data_size);
return RET_OK;
}
REG_KERNEL(kCPU, kNumberTypeInt32, PrimitiveType_Reshape, CPUKernelCreator<ReshapeCPUKernel>)
REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_Reshape, CPUKernelCreator<ReshapeCPUKernel>)
} // namespace mindspore::kernel

View File

@ -19,19 +19,17 @@
#include <vector>
#include "src/lite_kernel.h"
#include "include/context.h"
#include "src/runtime/kernel/arm/base/reshape_base.h"
using mindspore::lite::InnerContext;
namespace mindspore::kernel {
class ReshapeCPUKernel : public ReshapeBaseCPUKernel {
class ReshapeCPUKernel : public LiteKernel {
public:
ReshapeCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, const InnerContext *ctx,
const mindspore::lite::PrimitiveC *primitive)
: ReshapeBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {}
: LiteKernel(parameter, inputs, outputs, ctx, primitive) {}
~ReshapeCPUKernel() = default;
int Init() override;

View File

@ -15,17 +15,17 @@
*/
#include "src/runtime/kernel/arm/fp32/resize_fp32.h"
#include <algorithm>
#include "include/errorcode.h"
#include "nnacl/fp32/resize_fp32.h"
#include "schema/model_generated.h"
#include "src/kernel_registry.h"
#include "src/runtime/runtime_api.h"
using mindspore::kernel::KERNEL_ARCH::kCPU;
using mindspore::lite::KernelRegistrar;
using mindspore::lite::RET_ERROR;
using mindspore::lite::RET_INVALID_OP_ATTR;
using mindspore::lite::RET_NULL_PTR;
using mindspore::lite::RET_OK;
using mindspore::schema::PrimitiveType_Resize;
namespace mindspore::kernel {
int ResizeCPUKernel::Init() {
@ -217,4 +217,6 @@ int ResizeCPUKernel::Run() {
return RET_OK;
}
REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_Resize, CPUKernelCreator<ResizeCPUKernel>)
} // namespace mindspore::kernel

View File

@ -17,6 +17,9 @@
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_RESIZE_H_
#include <vector>
#include <algorithm>
#include "include/errorcode.h"
#include "nnacl/fp32/resize_fp32.h"
#include "src/lite_kernel.h"
#include "src/runtime/kernel/arm/base/resize_base.h"

View File

@ -111,4 +111,5 @@ int SoftmaxCPUKernel::Run() {
return ret;
}
REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_SoftMax, CPUKernelCreator<SoftmaxCPUKernel>)
} // namespace mindspore::kernel

View File

@ -87,4 +87,32 @@ int SplitCPUKernel::Run() {
return RET_OK;
}
kernel::LiteKernel *CpuSplitInt32KernelCreator(const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, OpParameter *opParameter,
const InnerContext *ctx, const kernel::KernelKey &desc,
const mindspore::lite::PrimitiveC *primitive) {
if (opParameter == nullptr) {
MS_LOG(ERROR) << "Input opParameter is nullptr!";
return nullptr;
}
MS_ASSERT(desc.type == schema::PrimitiveType_Split);
auto *kernel = new (std::nothrow) SplitCPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new SplitCPUKernel fail!";
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();
if (ret != RET_OK) {
delete kernel;
MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: "
<< schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(opParameter->type_));
return nullptr;
}
return kernel;
}
REG_KERNEL(kCPU, kNumberTypeInt32, PrimitiveType_Split, CPUKernelCreator<SplitCPUKernel>)
REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_Split, CPUKernelCreator<SplitCPUKernel>)
} // namespace mindspore::kernel

View File

@ -14,27 +14,19 @@
* limitations under the License.
*/
#include "src/runtime/kernel/arm/int8/batch_to_space_int8.h"
#include <vector>
#include "schema/model_generated.h"
#include "src/kernel_registry.h"
#include "nnacl/batch_to_space.h"
#include "nnacl/int8/batch_to_space_int8.h"
#include "include/errorcode.h"
using mindspore::lite::RET_OK;
using mindspore::lite::KernelRegistrar;
using mindspore::lite::RET_ERROR;
using mindspore::lite::RET_FORMAT_ERR;
using mindspore::lite::RET_OK;
using mindspore::schema::PrimitiveType_BatchToSpace;
using mindspore::schema::PrimitiveType_BatchToSpaceND;
namespace mindspore::kernel {
int BatchToSpaceInt8CPUKernel::Init() {
auto ret = BatchToSpaceBaseCPUKernel::Init();
if (ret != RET_OK) {
return ret;
}
MS_ASSERT(in_tensors_.at(0)->format() == schema::Format::Format_NHWC);
auto *input_tensor = in_tensors_.at(kInputIndex);
auto in_quant_args = input_tensor->quant_params();
in_quant_arg_.scale_ = in_quant_args.front().scale;
@ -50,7 +42,10 @@ int BatchToSpaceInt8CPUKernel::Init() {
return ReSize();
}
int BatchToSpaceInt8CPUKernel::ReSize() { return BatchToSpaceBaseCPUKernel::ReSize(); }
int BatchToSpaceInt8CPUKernel::ReSize() {
MS_ASSERT(in_tensors_.at(0)->shape().size() == 4);
return RET_OK;
}
int BatchToSpaceInt8CPUKernel::Run() {
auto input = in_tensors_[0];
@ -62,7 +57,7 @@ int BatchToSpaceInt8CPUKernel::Run() {
BatchToSpaceParameter *param = reinterpret_cast<BatchToSpaceParameter *>(this->op_parameter_);
if (in_quant_arg_.scale_ == out_quant_arg_.scale_ && in_quant_arg_.zp_ == out_quant_arg_.zp_) {
if (IsNoCrop()) {
if (param->no_crop_) {
BatchToSpaceNoCropForNHWC(input_data, output_data, in_shape.data(), out_shape[0], param->block_shape_,
sizeof(int8_t));
} else {
@ -70,7 +65,7 @@ int BatchToSpaceInt8CPUKernel::Run() {
sizeof(int8_t));
}
} else {
if (IsNoCrop()) {
if (param->no_crop_) {
BatchToSpaceNoCropForNHWCInt8(input_data, output_data, in_shape.data(), out_shape[0], param->block_shape_,
&in_quant_arg_, &out_quant_arg_);
} else {
@ -82,27 +77,6 @@ int BatchToSpaceInt8CPUKernel::Run() {
return RET_OK;
}
kernel::LiteKernel *CpuBatchToSpaceInt8KernelCreator(const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs,
OpParameter *op_parameter, const lite::InnerContext *ctx,
const kernel::KernelKey &desc,
const mindspore::lite::PrimitiveC *primitive) {
auto *kernel = new (std::nothrow) BatchToSpaceInt8CPUKernel(op_parameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new BatchToSpaceInt8CPUKernel fail!";
free(op_parameter);
return nullptr;
}
auto ret = kernel->Init();
if (ret != RET_OK) {
MS_LOG(ERROR) << "Init kernel failed, name: " << op_parameter->name_ << ", type: "
<< schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(op_parameter->type_));
delete kernel;
return nullptr;
}
return kernel;
}
REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_BatchToSpace, CpuBatchToSpaceInt8KernelCreator)
REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_BatchToSpaceND, CpuBatchToSpaceInt8KernelCreator)
REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_BatchToSpace, CPUKernelCreator<BatchToSpaceInt8CPUKernel>)
REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_BatchToSpaceND, CPUKernelCreator<BatchToSpaceInt8CPUKernel>)
} // namespace mindspore::kernel

View File

@ -17,15 +17,18 @@
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_INT8_BATCH_TO_SPACE_INT8_H_
#include <vector>
#include "src/runtime/kernel/arm/base/batch_to_space_base.h"
#include "include/errorcode.h"
#include "nnacl/batch_to_space.h"
#include "nnacl/int8/batch_to_space_int8.h"
#include "src/lite_kernel.h"
namespace mindspore::kernel {
class BatchToSpaceInt8CPUKernel : public BatchToSpaceBaseCPUKernel {
class BatchToSpaceInt8CPUKernel : public LiteKernel {
public:
BatchToSpaceInt8CPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx,
const mindspore::lite::PrimitiveC *primitive)
: BatchToSpaceBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {}
: LiteKernel(parameter, inputs, outputs, ctx, primitive) {}
~BatchToSpaceInt8CPUKernel() = default;

View File

@ -15,23 +15,17 @@
*/
#include "src/runtime/kernel/arm/int8/concat_int8.h"
#include <limits>
#include "nnacl/int8/concat_int8.h"
#include "schema/model_generated.h"
#include "include/errorcode.h"
#include "src/kernel_registry.h"
using mindspore::kernel::KERNEL_ARCH::kCPU;
using mindspore::lite::KernelRegistrar;
using mindspore::lite::RET_ERROR;
using mindspore::lite::RET_OK;
using mindspore::lite::KernelRegistrar;
using mindspore::schema::PrimitiveType_Concat;
namespace mindspore::kernel {
int ConcatInt8CPUKernel::Init() {
ConcatBaseCPUKernel::Init();
concat_param_->input_shapes_ = nullptr;
auto input_num = in_tensors_.size();
input_data_ = reinterpret_cast<int8_t **>(malloc(sizeof(int8_t *) * input_num));
@ -65,10 +59,9 @@ int ConcatInt8CPUKernel::Init() {
}
int ConcatInt8CPUKernel::ReSize() {
auto ret = ConcatBaseCPUKernel::ReSize();
if (ret != RET_OK) {
return ret;
}
concat_param_->axis_ =
concat_param_->axis_ >= 0 ? concat_param_->axis_ : in_tensors_.front()->shape().size() + concat_param_->axis_;
auto input_num = in_tensors_.size();
concat_param_->input_num_ = input_num;
concat_param_->input_shapes_ = reinterpret_cast<int **>(malloc(sizeof(int *) * input_num));
@ -113,7 +106,8 @@ int ConcatInt8CPUKernel::ReSize() {
int ConcatInt8CPUKernel::Run() {
auto input_num = concat_param_->input_num_;
count_unit_ = thread_count_ > 1 ? UP_DIV(before_axis_size, thread_count_) : before_axis_size;
count_unit_ =
op_parameter_->thread_num_ > 1 ? UP_DIV(before_axis_size, op_parameter_->thread_num_) : before_axis_size;
concat_param_->count_unit_ = count_unit_;
for (int i = 0; i < input_num; i++) {
@ -121,7 +115,7 @@ int ConcatInt8CPUKernel::Run() {
}
output_data_ = reinterpret_cast<int8_t *>(out_tensors_.at(0)->MutableData());
auto ret = ParallelLaunch(this->context_->thread_pool_, ConcatInt8Run, this, thread_count_);
auto ret = ParallelLaunch(this->context_->thread_pool_, ConcatInt8Run, this, op_parameter_->thread_num_);
return ret;
}
@ -141,26 +135,5 @@ int ConcatInt8CPUKernel::DoExecute(int task_id) {
return lite::RET_OK;
}
kernel::LiteKernel *CpuConcatInt8KernelCreator(const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, OpParameter *opParameter,
const InnerContext *ctx, const kernel::KernelKey &desc,
const mindspore::lite::PrimitiveC *primitive) {
auto *kernel = new (std::nothrow) ConcatInt8CPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new ConcatCPUKernel fail!";
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();
if (ret != RET_OK) {
MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: "
<< schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(opParameter->type_));
delete kernel;
return nullptr;
}
return kernel;
}
REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_Concat, CpuConcatInt8KernelCreator)
REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_Concat, CPUKernelCreator<ConcatInt8CPUKernel>)
} // namespace mindspore::kernel

View File

@ -18,20 +18,22 @@
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_INT8_CONCAT_INT8_H_
#include <vector>
#include <limits>
#include "nnacl/int8/concat_int8.h"
#include "include/errorcode.h"
#include "src/lite_kernel.h"
#include "include/context.h"
#include "src/runtime/kernel/arm/base/concat_base.h"
#include "src/runtime/runtime_api.h"
using mindspore::lite::InnerContext;
namespace mindspore::kernel {
class ConcatInt8CPUKernel : public ConcatBaseCPUKernel {
class ConcatInt8CPUKernel : public LiteKernel {
public:
ConcatInt8CPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, const InnerContext *ctx,
const std::vector<lite::Tensor *> &outputs, const mindspore::lite::InnerContext *ctx,
const mindspore::lite::PrimitiveC *primitive)
: ConcatBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {}
: LiteKernel(parameter, inputs, outputs, ctx, primitive) {
concat_param_ = reinterpret_cast<ConcatParameter *>(op_parameter_);
}
~ConcatInt8CPUKernel() override {
if (input_data_ != nullptr) {
free(input_data_);
@ -64,6 +66,7 @@ class ConcatInt8CPUKernel : public ConcatBaseCPUKernel {
int64_t count_unit_;
int8_t **input_data_ = nullptr;
int8_t *output_data_ = nullptr;
ConcatParameter *concat_param_ = nullptr;
};
int ConcatInt8Run(void *cdata, int task_id);

View File

@ -15,22 +15,17 @@
*/
#include "src/runtime/kernel/arm/int8/crop_int8.h"
#include <limits>
#include "nnacl/int8/crop_int8.h"
#include "include/errorcode.h"
#include "src/runtime/runtime_api.h"
#include "src/kernel_registry.h"
using mindspore::kernel::KERNEL_ARCH::kCPU;
using mindspore::lite::KernelRegistrar;
using mindspore::lite::RET_ERROR;
using mindspore::lite::RET_MEMORY_FAILED;
using mindspore::lite::RET_OK;
using mindspore::lite::KernelRegistrar;
using mindspore::schema::PrimitiveType_Crop;
namespace mindspore::kernel {
int CropInt8CPUKernel::Init() {
auto ret = CropBaseCPUKernel::Init();
if (ret != RET_OK) {
@ -69,7 +64,7 @@ CropInt8CPUKernel::~CropInt8CPUKernel() {
int CropInt8CPUKernel::ReSize() { return CropBaseCPUKernel::ReSize(); }
int CropInt8CPUKernel::Run() {
auto ret = ParallelLaunch(this->context_->thread_pool_, CropInt8Run, this, thread_count_);
auto ret = ParallelLaunch(this->context_->thread_pool_, CropInt8Run, this, crop_para_->thread_count_);
return ret;
}
@ -82,31 +77,11 @@ int CropInt8Run(void *cdata, int task_id) {
int CropInt8CPUKernel::DoExecute(int task_id) {
auto input_tensor = in_tensors_.at(kInputIndex);
auto out_tensor = out_tensors_.at(kOutputIndex);
int8_t *input_data = reinterpret_cast<int8_t *>(input_tensor->MutableData());
int8_t *output_data = reinterpret_cast<int8_t *>(out_tensor->MutableData());
int8_t *input_data = reinterpret_cast<int8_t *>(input_tensor->data_c());
int8_t *output_data = reinterpret_cast<int8_t *>(out_tensor->data_c());
Int8Crop(input_data, output_data, task_id, crop_para_);
return RET_OK;
}
kernel::LiteKernel *CpuCropInt8KernelCreator(const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, OpParameter *opParameter,
const InnerContext *ctx, const kernel::KernelKey &desc,
const mindspore::lite::PrimitiveC *primitive) {
auto *kernel = new (std::nothrow) CropInt8CPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new CropCPUKernel fail!";
free(opParameter);
return nullptr;
}
auto ret = kernel->Init();
if (ret != RET_OK) {
MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: "
<< schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(opParameter->type_));
delete kernel;
return nullptr;
}
return kernel;
}
REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_Crop, CpuCropInt8KernelCreator)
REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_Crop, CPUKernelCreator<CropInt8CPUKernel>)
} // namespace mindspore::kernel

View File

@ -18,32 +18,27 @@
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_INT8_CROP_INT8_H_
#include <vector>
#include "src/lite_kernel.h"
#include <limits>
#include "include/errorcode.h"
#include "include/context.h"
#include "src/runtime/kernel/arm/base/crop_base.h"
#include "nnacl/int8/crop_int8.h"
#include "src/lite_kernel.h"
#include "src/runtime/runtime_api.h"
using mindspore::lite::InnerContext;
#include "src/runtime/kernel/arm/base/crop_base.h"
namespace mindspore::kernel {
class CropInt8CPUKernel : public CropBaseCPUKernel {
public:
CropInt8CPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, const InnerContext *ctx,
const std::vector<lite::Tensor *> &outputs, const mindspore::lite::InnerContext *ctx,
const mindspore::lite::PrimitiveC *primitive)
: CropBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {
crop_para_ = reinterpret_cast<CropParameter *>(op_parameter_);
crop_para_->thread_count_ = op_parameter_->thread_num_;
}
: CropBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {}
~CropInt8CPUKernel();
int Init() override;
int ReSize() override;
int Run() override;
int DoExecute(int task_id);
private:
CropParameter *crop_para_;
};
int CropInt8Run(void *cdata, int task_id);

View File

@ -14,29 +14,19 @@
* limitations under the License.
*/
#include "src/runtime/kernel/arm/int8/depth_to_space_int8.h"
#include <vector>
#include "schema/model_generated.h"
#include "src/kernel_registry.h"
#include "nnacl/depth_to_space.h"
#include "nnacl/int8/depth_to_space_int8.h"
#include "include/errorcode.h"
using mindspore::lite::RET_ERROR;
using mindspore::lite::RET_OK;
using mindspore::lite::KernelRegistrar;
using mindspore::lite::RET_ERROR;
using mindspore::lite::RET_FORMAT_ERR;
using mindspore::lite::RET_OK;
using mindspore::lite::RET_PARAM_INVALID;
using mindspore::schema::PrimitiveType_DepthToSpace;
namespace mindspore::kernel {
int DepthToSpaceInt8CPUKernel::Init() {
auto ret = DepthToSpaceBaseCPUKernel::Init();
if (ret != RET_OK) {
return ret;
}
DepthToSpaceParameter *param = reinterpret_cast<DepthToSpaceParameter *>(op_parameter_);
param->data_type_size_ = sizeof(int8_t);
param_->data_type_size_ = sizeof(int8_t);
auto *input_tensor = in_tensors_.at(kInputIndex);
auto in_quant_args = input_tensor->quant_params();
@ -58,44 +48,17 @@ int DepthToSpaceInt8CPUKernel::ReSize() { return DepthToSpaceBaseCPUKernel::ReSi
int DepthToSpaceInt8CPUKernel::Run() {
auto input = in_tensors_[0];
auto output = out_tensors_[0];
const int8_t *input_data = reinterpret_cast<const int8_t *>(input->MutableData());
int8_t *output_data = reinterpret_cast<int8_t *>(output->MutableData());
const int8_t *input_data = reinterpret_cast<const int8_t *>(input->data_c());
int8_t *output_data = reinterpret_cast<int8_t *>(output->data_c());
auto in_shape = input->shape();
DepthToSpaceParameter *param = reinterpret_cast<DepthToSpaceParameter *>(op_parameter_);
if (in_quant_arg_.scale_ == out_quant_arg_.scale_ && in_quant_arg_.zp_ == out_quant_arg_.zp_) {
DepthToSpaceForNHWC(input_data, output_data, in_shape.data(), param);
DepthToSpaceForNHWC(input_data, output_data, in_shape.data(), param_);
} else {
DepthToSpaceForNHWCInt8(input_data, output_data, in_shape.data(), param, &in_quant_arg_, &out_quant_arg_);
DepthToSpaceForNHWCInt8(input_data, output_data, in_shape.data(), param_, &in_quant_arg_, &out_quant_arg_);
}
return RET_OK;
}
kernel::LiteKernel *CpuDepthToSpaceInt8KernelCreator(const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs,
OpParameter *op_parameter, const lite::InnerContext *ctx,
const kernel::KernelKey &desc,
const mindspore::lite::PrimitiveC *primitive) {
MS_ASSERT(desc.type == schema::PrimitiveType_DepthToSpace);
if (op_parameter == nullptr) {
MS_LOG(ERROR) << "Input op_parameter is nullptr!";
return nullptr;
}
auto *kernel = new (std::nothrow) DepthToSpaceInt8CPUKernel(op_parameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new BatchToSpaceInt8CPUKernel fail!";
free(op_parameter);
return nullptr;
}
auto ret = kernel->Init();
if (ret != RET_OK) {
MS_LOG(ERROR) << "Init kernel failed, name: " << op_parameter->name_ << ", type: "
<< schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(op_parameter->type_));
delete kernel;
return nullptr;
}
return kernel;
}
REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_DepthToSpace, CpuDepthToSpaceInt8KernelCreator)
REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_DepthToSpace, CPUKernelCreator<DepthToSpaceInt8CPUKernel>)
} // namespace mindspore::kernel

Some files were not shown because too many files have changed in this diff Show More