codecheck

This commit is contained in:
lzk 2021-06-03 19:46:56 -07:00
parent ab599aa23b
commit 11bd7374b4
82 changed files with 56 additions and 149 deletions

View File

@ -33,7 +33,6 @@ OpParameter *PopulateBiasAddParameter(const void *prim) {
param->op_parameter_.type_ = primitive->value_type();
return reinterpret_cast<OpParameter *>(param);
}
REG_POPULATE(PrimitiveType_BiasAdd, PopulateBiasAddParameter, SCHEMA_CUR)
} // namespace lite
} // namespace mindspore

View File

@ -32,7 +32,6 @@ OpParameter *PopulateClipParameter(const void *prim) {
param->type_ = primitive->value_type();
return reinterpret_cast<OpParameter *>(param);
}
REG_POPULATE(PrimitiveType_Clip, PopulateClipParameter, SCHEMA_CUR)
} // namespace lite
} // namespace mindspore

View File

@ -33,7 +33,6 @@ OpParameter *PopulateCommonParameter(const void *prim) {
param->type_ = primitive->value_type();
return param;
}
REG_POPULATE(PrimitiveType_ZerosLike, PopulateCommonParameter, SCHEMA_CUR)
REG_POPULATE(PrimitiveType_Depend, PopulateCommonParameter, SCHEMA_CUR)
} // namespace lite

View File

@ -32,8 +32,6 @@ OpParameter *PopulateCustomNormalizeParameter(const void *prim) {
param->type_ = primitive->value_type();
return reinterpret_cast<OpParameter *>(param);
}
REG_POPULATE(PrimitiveType_CustomNormalize, PopulateCustomNormalizeParameter, SCHEMA_CUR);
} // namespace lite
} // namespace mindspore

View File

@ -40,7 +40,6 @@ OpParameter *PopulateCustomPredictParameter(const void *prim) {
param->weight_threshold = value->weight_threshold();
return reinterpret_cast<OpParameter *>(param);
}
REG_POPULATE(PrimitiveType_CustomPredict, PopulateCustomPredictParameter, SCHEMA_CUR);
} // namespace lite
} // namespace mindspore

View File

@ -93,7 +93,6 @@ OpParameter *PopulateDeconvParameter(const void *prim) {
}
return reinterpret_cast<OpParameter *>(param);
}
REG_POPULATE(PrimitiveType_Conv2dTransposeFusion, PopulateDeconvParameter, SCHEMA_CUR)
} // namespace lite
} // namespace mindspore

View File

@ -55,7 +55,6 @@ OpParameter *PopulateDetectionPostProcessParameter(const void *prim) {
param->use_regular_nms_ = value->use_regular_nms();
return reinterpret_cast<OpParameter *>(param);
}
REG_POPULATE(PrimitiveType_DetectionPostProcess, PopulateDetectionPostProcessParameter, SCHEMA_CUR);
} // namespace lite
} // namespace mindspore

View File

@ -27,7 +27,6 @@ OpParameter *PopulateDivParameter(const void *prim) {
return reinterpret_cast<OpParameter *>(param);
}
REG_POPULATE(PrimitiveType_DivFusion, PopulateDivParameter, SCHEMA_CUR)
} // namespace lite
} // namespace mindspore

View File

@ -44,8 +44,6 @@ OpParameter *PopulateEmbeddingLookupParameter(const void *prim) {
}
return reinterpret_cast<OpParameter *>(param);
}
REG_POPULATE(PrimitiveType_EmbeddingLookupFusion, PopulateEmbeddingLookupParameter, SCHEMA_CUR);
} // namespace lite
} // namespace mindspore

View File

@ -32,7 +32,6 @@ OpParameter *PopulateFillParameter(const void *prim) {
param->type_ = primitive->value_type();
return reinterpret_cast<OpParameter *>(param);
}
REG_POPULATE(PrimitiveType_Fill, PopulateFillParameter, SCHEMA_CUR)
} // namespace lite
} // namespace mindspore

View File

@ -32,7 +32,6 @@ OpParameter *PopulateFlattenParameter(const void *prim) {
param->type_ = primitive->value_type();
return reinterpret_cast<OpParameter *>(param);
}
REG_POPULATE(PrimitiveType_Flatten, PopulateFlattenParameter, SCHEMA_CUR)
} // namespace lite
} // namespace mindspore

View File

@ -50,7 +50,6 @@ OpParameter *PopulateFullconnectionParameter(const void *prim) {
param->use_axis_ = value->use_axis();
return reinterpret_cast<OpParameter *>(param);
}
REG_POPULATE(PrimitiveType_FullConnection, PopulateFullconnectionParameter, SCHEMA_CUR)
} // namespace lite
} // namespace mindspore

View File

@ -41,7 +41,6 @@ OpParameter *PopulateFusedBatchNorm(const void *prim) {
param->fused_ = true;
return reinterpret_cast<OpParameter *>(param);
}
REG_POPULATE(PrimitiveType_FusedBatchNorm, PopulateFusedBatchNorm, SCHEMA_CUR)
} // namespace lite
} // namespace mindspore

View File

@ -32,8 +32,6 @@ OpParameter *PopulateHashtableLookupParameter(const void *prim) {
param->type_ = primitive->value_type();
return param;
}
REG_POPULATE(PrimitiveType_HashtableLookup, PopulateHashtableLookupParameter, SCHEMA_CUR);
} // namespace lite
} // namespace mindspore

View File

@ -42,8 +42,6 @@ OpParameter *PopulateLocalResponseNormParameter(const void *prim) {
param->beta_ = value->beta();
return reinterpret_cast<OpParameter *>(param);
}
REG_POPULATE(PrimitiveType_LRN, PopulateLocalResponseNormParameter, SCHEMA_CUR);
} // namespace lite
} // namespace mindspore

View File

@ -39,7 +39,6 @@ OpParameter *PopulateLshProjectionParameter(const void *prim) {
param->lsh_type_ = value->type();
return reinterpret_cast<OpParameter *>(param);
}
REG_POPULATE(PrimitiveType_LshProjection, PopulateLshProjectionParameter, SCHEMA_CUR);
} // namespace lite
} // namespace mindspore

View File

@ -42,7 +42,6 @@ OpParameter *PopulateMatMulParameter(const void *prim) {
param->act_type_ = ActType_No;
return reinterpret_cast<OpParameter *>(param);
}
REG_POPULATE(PrimitiveType_MatMul, PopulateMatMulParameter, SCHEMA_CUR)
} // namespace lite
} // namespace mindspore

View File

@ -40,6 +40,5 @@ OpParameter *PopulateNonMaxSuppressionParameter(const void *prim) {
return reinterpret_cast<OpParameter *>(param);
}
REG_POPULATE(PrimitiveType_NonMaxSuppression, PopulateNonMaxSuppressionParameter, SCHEMA_CUR);
} // namespace lite
} // namespace mindspore

View File

@ -39,7 +39,6 @@ OpParameter *PopulateOneHotParameter(const void *prim) {
param->axis_ = value->axis();
return reinterpret_cast<OpParameter *>(param);
}
REG_POPULATE(PrimitiveType_OneHot, PopulateOneHotParameter, SCHEMA_CUR)
} // namespace lite
} // namespace mindspore

View File

@ -39,7 +39,6 @@ OpParameter *PopulatePReLUParameter(const void *prim) {
param->channelShared = value->channel_shared();
return reinterpret_cast<OpParameter *>(param);
}
REG_POPULATE(PrimitiveType_PReLUFusion, PopulatePReLUParameter, SCHEMA_CUR)
} // namespace lite
} // namespace mindspore

View File

@ -40,7 +40,6 @@ OpParameter *PopulatePadParameter(const void *prim) {
param->constant_value_ = value->constant_value();
return reinterpret_cast<OpParameter *>(param);
}
REG_POPULATE(PrimitiveType_PadFusion, PopulatePadParameter, SCHEMA_CUR)
} // namespace lite
} // namespace mindspore

View File

@ -100,7 +100,6 @@ OpParameter *PopulatePriorBoxParameter(const void *prim) {
param->step_w = value->step_w();
return reinterpret_cast<OpParameter *>(param);
}
REG_POPULATE(PrimitiveType_PriorBox, PopulatePriorBoxParameter, SCHEMA_CUR)
} // namespace lite
} // namespace mindspore

View File

@ -40,7 +40,6 @@ OpParameter *PopulateQuantDTypeCastParameter(const void *prim) {
param->dstT = value->dst_t();
return reinterpret_cast<OpParameter *>(param);
}
REG_POPULATE(PrimitiveType_QuantDTypeCast, PopulateQuantDTypeCastParameter, SCHEMA_CUR);
} // namespace lite
} // namespace mindspore

View File

@ -42,7 +42,6 @@ OpParameter *PopulateRangeParameter(const void *prim) {
param->dType_ = value->d_type();
return reinterpret_cast<OpParameter *>(param);
}
REG_POPULATE(PrimitiveType_Range, PopulateRangeParameter, SCHEMA_CUR)
} // namespace lite
} // namespace mindspore

View File

@ -42,7 +42,6 @@ OpParameter *PopulateReduceParameter(const void *prim) {
param->mode_ = static_cast<int>(value->mode());
return reinterpret_cast<OpParameter *>(param);
}
REG_POPULATE(PrimitiveType_ReduceFusion, PopulateReduceParameter, SCHEMA_CUR)
} // namespace lite
} // namespace mindspore

View File

@ -44,7 +44,6 @@ OpParameter *PopulateResizeParameter(const void *prim) {
param->cubic_coeff_ = value->cubic_coeff();
return reinterpret_cast<OpParameter *>(param);
}
REG_POPULATE(PrimitiveType_Resize, PopulateResizeParameter, SCHEMA_CUR)
} // namespace lite
} // namespace mindspore

View File

@ -49,7 +49,6 @@ OpParameter *PopulateReverseParameter(const void *prim) {
}
return reinterpret_cast<OpParameter *>(param);
}
REG_POPULATE(PrimitiveType_ReverseV2, PopulateReverseParameter, SCHEMA_CUR)
} // namespace lite
} // namespace mindspore

View File

@ -40,7 +40,6 @@ OpParameter *PopulateReverseSequenceParameter(const void *prim) {
param->batch_axis_ = static_cast<int>(value->batch_dim());
return reinterpret_cast<OpParameter *>(param);
}
REG_POPULATE(PrimitiveType_ReverseSequence, PopulateReverseSequenceParameter, SCHEMA_CUR);
} // namespace lite
} // namespace mindspore

View File

@ -32,7 +32,6 @@ OpParameter *PopulateScatterNDParameter(const void *prim) {
param->type_ = primitive->value_type();
return reinterpret_cast<OpParameter *>(param);
}
REG_POPULATE(PrimitiveType_ScatterNd, PopulateScatterNDParameter, SCHEMA_CUR)
} // namespace lite
} // namespace mindspore

View File

@ -41,7 +41,6 @@ OpParameter *PopulateSkipGramParameter(const void *prim) {
param->include_all_ngrams = value->include_all_grams();
return reinterpret_cast<OpParameter *>(param);
}
REG_POPULATE(PrimitiveType_SkipGram, PopulateSkipGramParameter, SCHEMA_CUR)
} // namespace lite
} // namespace mindspore

View File

@ -50,7 +50,6 @@ OpParameter *PopulateSliceParameter(const void *prim) {
}
return reinterpret_cast<OpParameter *>(param);
}
REG_POPULATE(PrimitiveType_SliceFusion, PopulateSliceParameter, SCHEMA_CUR)
} // namespace lite
} // namespace mindspore

View File

@ -43,7 +43,6 @@ OpParameter *PopulateStridedSliceGradParameter(const void *prim) {
param->shrinkAxisMask_ = value->shrink_axis_mask();
return reinterpret_cast<OpParameter *>(param);
}
REG_POPULATE(PrimitiveType_StridedSliceGrad, PopulateStridedSliceGradParameter, SCHEMA_CUR);
} // namespace lite
} // namespace mindspore

View File

@ -33,7 +33,6 @@ OpParameter *PopulateSubParameter(const void *prim) {
param->op_parameter_.type_ = primitive->value_type();
return reinterpret_cast<OpParameter *>(param);
}
REG_POPULATE(PrimitiveType_SubFusion, PopulateSubParameter, SCHEMA_CUR)
} // namespace lite
} // namespace mindspore

View File

@ -41,7 +41,6 @@ OpParameter *PopulateTensorListFromTensorParameter(const void *prim) {
param->element_dtype_ = value->element_dtype();
return reinterpret_cast<OpParameter *>(param);
}
REG_POPULATE(PrimitiveType_TensorListFromTensor, PopulateTensorListFromTensorParameter, SCHEMA_CUR);
} // namespace lite
} // namespace mindspore

View File

@ -39,7 +39,6 @@ OpParameter *PopulateTensorListGetItemParameter(const void *prim) {
param->element_dtype_ = value->element_dtype();
return reinterpret_cast<OpParameter *>(param);
}
REG_POPULATE(PrimitiveType_TensorListGetItem, PopulateTensorListGetItemParameter, SCHEMA_CUR);
} // namespace lite
} // namespace mindspore

View File

@ -39,7 +39,6 @@ OpParameter *PopulateTensorListReserveParameter(const void *prim) {
param->element_dtype_ = value->element_dtype();
return reinterpret_cast<OpParameter *>(param);
}
REG_POPULATE(PrimitiveType_TensorListReserve, PopulateTensorListReserveParameter, SCHEMA_CUR);
} // namespace lite
} // namespace mindspore

View File

@ -39,7 +39,6 @@ OpParameter *PopulateTensorListSetItemParameter(const void *prim) {
param->element_dtype_ = value->element_dtype();
return reinterpret_cast<OpParameter *>(param);
}
REG_POPULATE(PrimitiveType_TensorListSetItem, PopulateTensorListSetItemParameter, SCHEMA_CUR);
} // namespace lite
} // namespace mindspore

View File

@ -40,7 +40,6 @@ OpParameter *PopulateTensorListStackParameter(const void *prim) {
param->num_element_ = value->num_elements();
return reinterpret_cast<OpParameter *>(param);
}
REG_POPULATE(PrimitiveType_TensorListStack, PopulateTensorListStackParameter, SCHEMA_CUR);
} // namespace lite
} // namespace mindspore

View File

@ -45,7 +45,6 @@ OpParameter *PopulateTileParameter(const void *prim) {
}
return reinterpret_cast<OpParameter *>(param);
}
REG_POPULATE(PrimitiveType_TileFusion, PopulateTileParameter, SCHEMA_CUR)
} // namespace lite
} // namespace mindspore

View File

@ -39,7 +39,6 @@ OpParameter *PopulateTopKParameter(const void *prim) {
param->sorted_ = value->sorted();
return reinterpret_cast<OpParameter *>(param);
}
REG_POPULATE(PrimitiveType_TopKFusion, PopulateTopKParameter, SCHEMA_CUR)
} // namespace lite
} // namespace mindspore

View File

@ -33,7 +33,6 @@ OpParameter *PopulateTransposeParameter(const void *prim) {
param->op_parameter_.type_ = primitive->value_type();
return reinterpret_cast<OpParameter *>(param);
}
REG_POPULATE(PrimitiveType_Transpose, PopulateTransposeParameter, SCHEMA_CUR)
} // namespace lite
} // namespace mindspore

View File

@ -33,7 +33,6 @@ OpParameter *PopulateUniqueParameter(const void *prim) {
param->op_parameter_.type_ = primitive->value_type();
return reinterpret_cast<OpParameter *>(param);
}
REG_POPULATE(PrimitiveType_Unique, PopulateUniqueParameter, SCHEMA_CUR)
} // namespace lite
} // namespace mindspore

View File

@ -39,7 +39,6 @@ OpParameter *PopulateUnstackParameter(const void *prim) {
param->axis_ = value->axis();
return reinterpret_cast<OpParameter *>(param);
}
REG_POPULATE(PrimitiveType_Unstack, PopulateUnstackParameter, SCHEMA_CUR)
} // namespace lite
} // namespace mindspore

View File

@ -40,8 +40,6 @@ OpParameter *PopulateCommonParameter(const void *prim) {
return common_parameter;
}
} // namespace
Registry g_zerosLikeV0ParameterRegistry(schema::v0::PrimitiveType_ZerosLike, PopulateCommonParameter, SCHEMA_V0);
} // namespace lite
} // namespace mindspore

View File

@ -77,7 +77,6 @@ OpParameter *PopulateDeconvDwParameter(const void *prim) {
return reinterpret_cast<OpParameter *>(conv_param);
}
} // namespace
Registry g_deDepthwiseConv2DV0ParameterRegistry(schema::v0::PrimitiveType_DeDepthwiseConv2D, PopulateDeconvDwParameter,
SCHEMA_V0);
} // namespace lite

View File

@ -41,9 +41,7 @@ OpParameter *PopulateDepthToSpaceParameter(const void *prim) {
return reinterpret_cast<OpParameter *>(depth_space_param);
}
} // namespace
Registry g_depthToSpaceV0ParameterRegistry(schema::v0::PrimitiveType_DepthToSpace, PopulateDepthToSpaceParameter,
SCHEMA_V0);
} // namespace lite
} // namespace mindspore

View File

@ -20,10 +20,11 @@
namespace mindspore {
namespace lite {
namespace {
typedef struct PartialParameter {
struct PartialParameter {
OpParameter op_parameter_;
int sub_graph_index_;
} PartialParameter;
int sub_graph_index_ = 0;
};
using PartialParameter = PartialParameter;
OpParameter *PopulatePartialParameter(const void *prim) {
auto *primitive = static_cast<const schema::v0::Primitive *>(prim);

View File

@ -40,9 +40,7 @@ OpParameter *PopulateTensorListFromTensorParameter(const void *prim) {
return reinterpret_cast<OpParameter *>(TensorList_param);
}
} // namespace
Registry g_tensorListFromTensorV0ParameterRegistry(schema::v0::PrimitiveType_TensorListFromTensor,
PopulateTensorListFromTensorParameter, SCHEMA_V0);
} // namespace lite
} // namespace mindspore

View File

@ -39,9 +39,7 @@ OpParameter *PopulateTensorListReserveParameter(const void *prim) {
return reinterpret_cast<OpParameter *>(reserve_param);
}
} // namespace
Registry g_tensorListReserveV0ParameterRegistry(schema::v0::PrimitiveType_TensorListReserve,
PopulateTensorListReserveParameter, SCHEMA_V0);
} // namespace lite
} // namespace mindspore

View File

@ -39,9 +39,7 @@ OpParameter *PopulateTensorListSetItemParameter(const void *prim) {
return reinterpret_cast<OpParameter *>(setItem_param);
}
} // namespace
Registry g_tensorListSetItemV0ParameterRegistry(schema::v0::PrimitiveType_TensorListSetItem,
PopulateTensorListSetItemParameter, SCHEMA_V0);
} // namespace lite
} // namespace mindspore

View File

@ -20,12 +20,12 @@
namespace mindspore {
namespace lite {
namespace {
typedef struct WhileParemeter {
struct WhileParemeter {
OpParameter op_parameter_;
int body_subgraph_index;
int cond_subgraph_index;
} WhileParemeter;
int body_subgraph_index = 0;
int cond_subgraph_index = 0;
};
using WhileParemeter = WhileParemeter;
OpParameter *PopulateWhileParameter(const void *prim) {
auto *primitive = static_cast<const schema::v0::Primitive *>(prim);
MS_ASSERT(primitive != nullptr);

View File

@ -18,12 +18,12 @@ using mindspore::schema::PrimitiveType_While;
namespace mindspore {
namespace lite {
typedef struct WhileParemeter {
struct WhileParemeter {
OpParameter op_parameter_;
int body_subgraph_index;
int cond_subgraph_index;
} WhileParemeter;
int body_subgraph_index = 0;
int cond_subgraph_index = 0;
};
using WhileParemeter = WhileParemeter;
OpParameter *PopulateWhileParemeter(const void *prim) {
auto primitive = static_cast<const schema::Primitive *>(prim);
MS_ASSERT(primitive != nullptr);

View File

@ -24,7 +24,7 @@
#include "src/common/file_utils.h"
namespace mindspore::lite {
#define MAX_MODEL_NUM 20
const int max_model_num = 20;
int NPUManager::CompareVersion(const string &version1, const string &version2) {
std::istringstream iss1(version1);
std::istringstream iss2(version2);
@ -172,7 +172,7 @@ int NPUManager::LoadOMModel() {
std::unordered_map<std::shared_ptr<hiai::AiModelBuilder>, hiai::MemBuffer *> builder_buffer_map;
int total = 0;
for (const auto &model_map : models_) {
if (total % MAX_MODEL_NUM == 0) {
if (total % max_model_num == 0) {
client = CreateAiModelMngerClient();
if (client == nullptr) {
MS_LOG(ERROR) << "Create Client failed.";
@ -198,7 +198,7 @@ int NPUManager::LoadOMModel() {
}
builder_buffer_map.insert({mc_builder, buffer});
model->desc_->SetModelBuffer(buffer->GetMemBufferData(), buffer->GetMemBufferSize());
if (models_desc.size() == MAX_MODEL_NUM) {
if (models_desc.size() == max_model_num) {
auto ret = LoadModel(client, models_desc);
if (ret != RET_ERROR) {
MS_LOG(ERROR) << "Client load model failed.";

View File

@ -21,7 +21,7 @@
namespace mindspore::lite {
using kernel::KERNEL_ARCH::kNPU;
enum InsertState { InsertNone, PreInsert, PostInsert, BothInsert };
enum class InsertState : int { InsertNone, PreInsert, PostInsert, BothInsert };
std::set<mindspore::schema::PrimitiveType> npu_insert_nodes = {
schema::PrimitiveType_Concat, schema::PrimitiveType_AddFusion, schema::PrimitiveType_Eltwise,
schema::PrimitiveType_Activation, schema::PrimitiveType_Split, schema::PrimitiveType_PadFusion,
@ -31,7 +31,6 @@ std::set<mindspore::schema::PrimitiveType> npu_insert_nodes = {
// by inserting nchw2nhwc or nhwc2nchw before or after the operator (e.g. concat, add, etc..) together with
// fusion pass. If transpose inserted are more than half of input output, we will insert remaining input
// output with transpose and hopefully do a fusion pass. Otherwise, we don't insert anything.
//
// Typically concat accept output from nchw2nhwc, we fill other input with nh2nc and nc2nh so that inputs to concat are
// format same and then fusion all nchw2nhwc op.
// e.g.
@ -39,9 +38,7 @@ std::set<mindspore::schema::PrimitiveType> npu_insert_nodes = {
// current pass (conv->nchw2nhwc, add->nhwc2nchw->nchw2nhwc) -> concat -> (nhwc2nchw->conv)
// fusion pass (conv, add->nhwc2nchw) -> concat -> conv
// original 2 cpusubgraph, after 2 pass, only 1 cpu subgraph
//
// node:
// Such ops require inputs all have same format, could be nchw or nhwc or other format.
// note: Such ops require inputs all have same format, could be nchw or nhwc or other format.
// Their inputs outputs may not be 4d, or are already format ok,
// so we won't insert nc2nh or nh2nc when op's in kernels and out kernels contains no nc2nh or nh2nc.
// This pass should be run after npu_transform_pass, which insert transpose for nchw-input-limited op like conv2d.
@ -49,7 +46,7 @@ std::set<mindspore::schema::PrimitiveType> npu_insert_nodes = {
int NPUInsertTransformPass::GetInsertState(kernel::LiteKernel *kernel) {
// filter out irrelevant kernel
if (npu_insert_nodes.find(kernel->type()) == npu_insert_nodes.end()) {
return InsertNone;
return static_cast<int>(InsertState::InsertNone);
}
// current kernel is target kernel
@ -87,20 +84,20 @@ int NPUInsertTransformPass::GetInsertState(kernel::LiteKernel *kernel) {
size_t transpose_tensor_num = transpose_input_num + transpose_output_num;
if (transpose_tensor_num == 0 || transpose_tensor_num * 2 < in_out_tensor_num ||
transpose_tensor_num == in_out_tensor_num) {
return InsertNone;
return static_cast<int>(InsertState::InsertNone);
}
InsertState ret;
if (need_pre_insert && !need_post_insert) {
ret = PreInsert;
ret = InsertState::PreInsert;
} else if (need_pre_insert && need_post_insert) {
ret = BothInsert;
ret = InsertState::BothInsert;
} else if (!need_pre_insert && need_post_insert) {
ret = PostInsert;
ret = InsertState::PostInsert;
} else {
ret = InsertNone;
ret = InsertState::InsertNone;
}
return ret;
return static_cast<int>(ret);
}
int NPUInsertTransformPass::InsertNode(kernel::LiteKernel *kernel, kernel::LiteKernel *post_kernel,
@ -274,7 +271,7 @@ int NPUInsertTransformPass::Run() {
// If the every output kernel is nhwc2nchw, insert
// modify loop index add post_kernels.size() to the next kernel in the origin vector
switch (insert_state) {
case PreInsert: {
case static_cast<int>(InsertState::PreInsert): {
auto ret = InsertPreNodes(kernel, &insert_kernels);
if (ret != RET_OK) {
MS_LOG(ERROR) << "Insert nhwc2nchw kernel and nchw2nhwc kernel before kernel " << kernel->name()
@ -285,7 +282,7 @@ int NPUInsertTransformPass::Run() {
i += insert_kernels.size();
break;
}
case PostInsert: {
case static_cast<int>(InsertState::PostInsert): {
auto ret = InsertPostNodes(kernel, &insert_kernels);
if (ret != RET_OK) {
MS_LOG(ERROR) << "Insert nhwc2nchw kernel and nchw2nhwc kernel after kernel " << kernel->name()
@ -296,7 +293,7 @@ int NPUInsertTransformPass::Run() {
i += insert_kernels.size();
break;
}
case BothInsert: {
case static_cast<int>(InsertState::BothInsert): {
auto ret = InsertPreNodes(kernel, &insert_kernels);
if (ret != RET_OK) {
MS_LOG(ERROR) << "Insert nhwc2nchw kernel and nchw2nhwc kernel before kernel " << kernel->name()

View File

@ -18,7 +18,6 @@
#include "include/errorcode.h"
#include "src/common/log_adapter.h"
namespace mindspore::lite {
void NPUPassManager::AddPass(NPUBasePass *pass) { all_pass_.push_back(pass); }
int NPUPassManager::Run() {
for (auto pass : all_pass_) {

View File

@ -104,8 +104,7 @@ int SubGraphNpuKernel::BuildNPUInputOp() {
auto in_tensor = node->in_tensors()[i];
if (IsSubGraphInputTensor(in_tensor)) {
auto tensor_name = node->name() + "_" + std::to_string(count++);
hiai::op::Data *data;
data = mindspore::lite::ConverterToNPUData(in_tensor, tensor_name);
hiai::op::Data *data = mindspore::lite::ConverterToNPUData(in_tensor, tensor_name);
subgraph_input_op_.push_back(*data);
node_input_op.push_back(data);
op_buffer_.push_back(data);
@ -206,14 +205,10 @@ int SubGraphNpuKernel::Init() {
MS_LOG(ERROR) << "Build IR model failed.";
return RET_ERROR;
}
MS_ASSERT(npu_manager_ != nullptr);
npu_manager_->AddModel(model_buffer_data, GetOMModelName(),
static_cast<const lite::InnerContext *>(this->Context())->GetNpuInfo().frequency_);
executor_ = new (std::nothrow) mindspore::lite::NPUExecutor(GetOMModelName(), npu_manager_);
if (executor_ == nullptr) {
MS_LOG(ERROR) << "Create NPUExecutor failed.";
return RET_ERROR;

View File

@ -65,20 +65,17 @@ int KernelInferShape(const std::vector<lite::Tensor *> &inputs, const std::vecto
std::vector<TensorC *> in_tensors;
std::vector<TensorC *> out_tensors;
int ret = 0;
ret = GenerateInTensorC(parameter, inputs, outputs, &in_tensors);
if (ret != RET_OK) {
FreeAllTensorC(&in_tensors);
return RET_ERROR;
}
ret = GenerateOutTensorC(parameter, inputs, outputs, &out_tensors);
if (ret != RET_OK) {
FreeAllTensorC(&in_tensors);
FreeAllTensorC(&out_tensors);
return RET_ERROR;
}
auto infer_shape_func = GetInferFunc(parameter->type_);
if (infer_shape_func == nullptr) {
MS_LOG(ERROR) << "Get infershape func failed! type:" << PrimitiveCurVersionTypeName(parameter->type_);
@ -86,7 +83,6 @@ int KernelInferShape(const std::vector<lite::Tensor *> &inputs, const std::vecto
}
ret = infer_shape_func(static_cast<TensorC **>(in_tensors.data()), in_tensors.size(), out_tensors.data(),
out_tensors.size(), parameter);
for (size_t i = 0; i < out_tensors.size(); i++) {
if (out_tensors.at(i) == nullptr) {
continue;
@ -108,7 +104,6 @@ int KernelInferShape(const std::vector<lite::Tensor *> &inputs, const std::vecto
outputs.at(i)->set_shape({-1});
}
}
FreeAllTensorC(&in_tensors);
FreeAllTensorC(&out_tensors);
if (ret == NNACL_INFER_INVALID) {

View File

@ -15,7 +15,7 @@
*/
#include "src/runtime/kernel/arm/base/convolution_base.h"
#include <float.h>
#include <cfloat>
#include "schema/model_generated.h"
#include "src/kernel_registry.h"
#include "include/errorcode.h"

View File

@ -26,7 +26,6 @@ using mindspore::lite::RET_OK;
using mindspore::schema::PrimitiveType_RandomStandardNormal;
namespace mindspore::kernel {
int RandomStandardNormalCPUKernel::Init() { return RET_OK; }
int RandomStandardNormalCPUKernel::ReSize() { return RET_OK; }
@ -51,6 +50,5 @@ int RandomStandardNormalCPUKernel::Run() {
}
return RET_OK;
}
REG_KERNEL(kCPU, kNumberTypeInt32, PrimitiveType_RandomStandardNormal, LiteKernelCreator<RandomStandardNormalCPUKernel>)
} // namespace mindspore::kernel

View File

@ -63,9 +63,9 @@ int ReduceBaseCPUKernel::CheckParameters() {
}
for (auto i = 0; i < num_axes_; i++) {
if (axes_[i] < -static_cast<int>(input_rank) || axes_[i] >= static_cast<int>(input_rank)) {
if (axes_[i] < -(static_cast<int>(input_rank)) || axes_[i] >= static_cast<int>(input_rank)) {
MS_LOG(ERROR) << "Reduce got invalid axis " << axes_[i] << ", axis should be in ["
<< -static_cast<int>(input_rank) << ", " << input_rank - 1 << "].";
<< -(static_cast<int>(input_rank)) << ", " << input_rank - 1 << "].";
return RET_ERROR;
}
if (axes_[i] < 0) {

View File

@ -28,7 +28,6 @@ using mindspore::lite::RET_NULL_PTR;
using mindspore::lite::RET_OK;
namespace mindspore::kernel {
int SoftmaxBaseCPUKernel::Init() {
if (softmax_param_ == nullptr) {
MS_LOG(ERROR) << "SoftmaxParameter nullptr";

View File

@ -26,7 +26,6 @@ using mindspore::lite::RET_OK;
using mindspore::schema::PrimitiveType_TensorListFromTensor;
namespace mindspore::kernel {
int TensorListFromTensorCPUKernel::IsCompatibleShape() {
if (input1_->data_type() != kNumberTypeInt && input1_->data_type() != kNumberTypeInt32) { // element_shape
MS_LOG(ERROR) << "in_tensors_[1] data type is must be int";
@ -35,7 +34,7 @@ int TensorListFromTensorCPUKernel::IsCompatibleShape() {
int in1_ele_num = input1_->ElementsNum();
std::vector<int> tensor_shape = input0_->shape();
if (static_cast<int>(tensor_shape.size() - 1) != in1_ele_num) {
MS_LOG(ERROR) << "in_tensors_[0].shape().size() - 1:" << tensor_shape.size() - 1
MS_LOG(ERROR) << "in_tensors_[0].shape().size() - 1:" << (tensor_shape.size() - 1)
<< " must be equal in_tensors_[1].ElementsNum():" << in1_ele_num;
return RET_ERROR;
}
@ -44,7 +43,7 @@ int TensorListFromTensorCPUKernel::IsCompatibleShape() {
int dim0 = tensor_shape[i + 1];
int dim1 = elements_shape[i];
if (dim0 >= 0 && dim1 >= 0 && dim0 != dim1) {
MS_LOG(ERROR) << "input0_->shape()[" << i + 1 << "]:" << dim0 << " is not equal input1_->data_c()[" << i
MS_LOG(ERROR) << "input0_->shape()[" << (i + 1) << "]:" << dim0 << " is not equal input1_->data_c()[" << i
<< "]:" << dim1;
return RET_ERROR;
}

View File

@ -27,7 +27,6 @@ using mindspore::lite::RET_OK;
using mindspore::schema::PrimitiveType_TensorListGetItem;
namespace mindspore::kernel {
int TensorListGetItemCPUKernel::Init() { return RET_OK; }
int TensorListGetItemCPUKernel::Run() {

View File

@ -26,7 +26,6 @@ using mindspore::lite::RET_OK;
using mindspore::schema::PrimitiveType_TensorListReserve;
namespace mindspore::kernel {
int TensorListReserveCPUKernel::Init() { return RET_OK; }
int TensorListReserveCPUKernel::Run() {

View File

@ -27,7 +27,6 @@ using mindspore::lite::RET_OK;
using mindspore::schema::PrimitiveType_TensorListSetItem;
namespace mindspore::kernel {
int TensorListSetItemCPUKernel::Init() { return RET_OK; }
int TensorListSetItemCPUKernel::CheckParam() {

View File

@ -29,7 +29,6 @@ using mindspore::lite::RET_OK;
using mindspore::schema::PrimitiveType_TensorListStack;
namespace mindspore::kernel {
int TensorListStackCPUKernel::CheckParam() {
if (num_element_ != -1 && input0_->ElementsNum() != num_element_) {
MS_LOG(ERROR) << "in_tensors_[0].ElementsNum():[" << input0_->ElementsNum() << "] must be equal "

View File

@ -24,10 +24,11 @@ using mindspore::lite::RET_OK;
namespace mindspore::kernel {
namespace {
typedef struct {
int primitive_type_;
ArithmeticSelfFp16Func func_;
} TYPE_FUNC_INFO;
struct TYPE_FUNC_INFO {
int primitive_type_ = 0;
ArithmeticSelfFp16Func func_ = nullptr;
};
using TYPE_FUNC_INFO = TYPE_FUNC_INFO;
} // namespace
ArithmeticSelfFp16Func ArithmeticSelfFp16CPUKernel::GetArithmeticSelfFp16Fun(int primitive_type) {

View File

@ -28,7 +28,6 @@ using mindspore::lite::RET_OK;
using mindspore::schema::PrimitiveType_BiasAdd;
namespace mindspore::kernel {
int BiasAddCPUFp16Kernel::ReSize() {
auto dims = in_tensors_.at(0)->shape();
bias_param_->ndim_ = dims.size();

View File

@ -109,7 +109,6 @@ int ConvolutionDepthwiseFp16CPUKernel::Run() {
if (ret != RET_OK) {
MS_LOG(ERROR) << "ConvDwFp16Run error: error_code[" << ret << "]";
}
return ret;
}

View File

@ -175,7 +175,6 @@ int DeConvWinogradFp16CPUKernel::InitParameter() {
if (unit.tmp_buffer_ == nullptr) {
return RET_NULL_PTR;
}
} else {
unit.tmp_buffer_ = malloc(deconv_param_->thread_num_ * deconv_param_->oc_div4_ * unit.w_size_ * unit.h_size_ *
DECONV_WINOGRAD_DEFAULT_TILE * C4NUM * sizeof(float16_t));
@ -184,7 +183,6 @@ int DeConvWinogradFp16CPUKernel::InitParameter() {
}
}
}
return RET_OK;
}
@ -315,7 +313,7 @@ int DeConvWinogradFp16CPUKernel::InitComputeParam() {
}
int DeConvWinogradFp16CPUKernel::InitDataParam() {
/* unit data : weight & winograd data*/
/* unit data : weight & winograd data */
auto weight_tensor = in_tensors_.at(kWeightIndex);
auto origin_weight = reinterpret_cast<float16_t *>(weight_tensor->data_c());
@ -395,7 +393,7 @@ int DeConvWinogradFp16CPUKernel::Run() {
static_cast<const lite::InnerContext *>(this->context_)
->thread_pool_->ParallelLaunch(DeConvWgFp16Run, this, deconv_param_->thread_num_);
/*post bias activate and nhwc */
// post bias activate and nhwc
static_cast<const lite::InnerContext *>(this->context_)
->thread_pool_->ParallelLaunch(DeConvWgPostFp16Run, this, thread_num_hw_);
}

View File

@ -311,5 +311,4 @@ int MatmulBaseFP16CPUKernel::Run() {
}
return RET_OK;
}
} // namespace mindspore::kernel

View File

@ -38,7 +38,6 @@ using mindspore::schema::ReduceMode_ReduceSum;
using mindspore::schema::ReduceMode_ReduceSumSquare;
namespace mindspore::kernel {
int ReduceFp16CPUKernel::Init() {
auto ret = ReduceBaseCPUKernel::Init();
if (ret != RET_OK) {

View File

@ -15,7 +15,7 @@
*/
#include "src/runtime/kernel/arm/fp16/scale_fp16.h"
#include <string.h>
#include <cstring>
#include <vector>
#include "schema/model_generated.h"
#include "src/kernel_registry.h"
@ -31,7 +31,6 @@ using mindspore::lite::RET_OK;
using mindspore::schema::PrimitiveType_ScaleFusion;
namespace mindspore::kernel {
int ScaleFp16CPUKernel::InitScaleOffset() {
auto scale_tensor = in_tensors_.at(1);
malloc_scale_ = scale_tensor->data_type() == kNumberTypeFloat32;

View File

@ -14,7 +14,7 @@
* limitations under the License.
*/
#include <string.h>
#include <cstring>
#include <vector>
#include "src/runtime/kernel/arm/fp16/softmax_fp16.h"
#include "src/runtime/kernel/arm/fp16/common_fp16.h"

View File

@ -23,10 +23,11 @@ using mindspore::lite::RET_OK;
namespace mindspore::kernel {
namespace {
typedef struct {
int primitive_type_;
ArithmeticSelfFunc func_;
} TYPE_FUNC_INFO;
struct TYPE_FUNC_INFO {
int primitive_type_ = 0;
ArithmeticSelfFunc func_ = nullptr;
};
using TYPE_FUNC_INFO = TYPE_FUNC_INFO;
} // namespace
ArithmeticSelfFunc ArithmeticSelfCPUKernel::GetArithmeticSelfFun(int primitive_type) {

View File

@ -156,7 +156,7 @@ int Convolution1x1CPUKernel::Init() {
}
void Convolution1x1CPUKernel::PackMatmulInput(const float *src_ptr, float *dst_ptr, int row, int col) {
#if ENABLE_AVX
#ifdef ENABLE_AVX
RowMajor2Col6Major(src_ptr, dst_ptr, row, col);
#elif defined(ENABLE_SSE)
RowMajor2Col4Major(src_ptr, dst_ptr, row, col);
@ -290,5 +290,4 @@ int Convolution1x1CPUKernel::Eval() {
}
return RET_OK;
}
} // namespace mindspore::kernel

View File

@ -225,5 +225,4 @@ int ConvolutionDepthwiseIndirectCPUKernel::Eval() {
}
return RET_OK;
}
} // namespace mindspore::kernel

View File

@ -200,5 +200,4 @@ int ConvolutionDepthwiseSWCPUKernel::Eval() {
}
return RET_OK;
}
} // namespace mindspore::kernel

View File

@ -30,7 +30,7 @@ using mindspore::lite::RET_OK;
namespace mindspore::kernel {
#ifdef ENABLE_AVX
#define OC_BLOCK C16NUM
#elif ENABLE_ARM32
#elif defined(ENABLE_ARM32)
#define OC_BLOCK C4NUM
#else
#define OC_BLOCK C8NUM
@ -54,7 +54,7 @@ int ConvolutionCPUKernel::InitWeightBias() {
memset(packed_weight_, 0, pack_weight_size * sizeof(float));
#ifdef ENABLE_AVX
RowMajor2Col16Major(origin_weight_, packed_weight_, out_channel, in_channel * kernel_plane);
#elif ENABLE_ARM32
#elif defined(ENABLE_ARM32)
RowMajor2Col4Major(origin_weight_, packed_weight_, out_channel, in_channel * kernel_plane);
#else
RowMajor2Col8Major(origin_weight_, packed_weight_, out_channel, in_channel * kernel_plane);
@ -80,7 +80,7 @@ int ConvolutionCPUKernel::InitTmpBuffer() {
#ifdef ENABLE_AVX
int unit_size = conv_param_->kernel_h_ * conv_param_->kernel_w_ * conv_param_->input_channel_ * C6NUM * thread_count_;
#elif ENABLE_SSE
#elif defined(ENABLE_SSE)
int unit_size = conv_param_->kernel_h_ * conv_param_->kernel_w_ * conv_param_->input_channel_ * C4NUM * thread_count_;
#else
int unit_size =
@ -173,7 +173,7 @@ void ConvolutionCPUKernel::PackWeight() {
memset(packed_weight_, 0, pack_weight_size * sizeof(float));
#ifdef ENABLE_AVX
RowMajor2Col16Major(origin_weight, packed_weight_, out_channel, in_channel * kernel_plane);
#elif ENABLE_ARM32
#elif defined(ENABLE_ARM32)
RowMajor2Col4Major(origin_weight, packed_weight_, out_channel, in_channel * kernel_plane);
#else
RowMajor2Col8Major(origin_weight, packed_weight_, out_channel, in_channel * kernel_plane);

View File

@ -236,5 +236,4 @@ int ConvolutionWinogradCPUKernel::Eval() {
}
return RET_OK;
}
} // namespace mindspore::kernel

View File

@ -160,7 +160,6 @@ int DeConvolutionWinogradCPUKernel::InitParameter() {
MS_LOG(ERROR) << "tmp_buffer_ error!";
return RET_NULL_PTR;
}
} else {
unit.tmp_buffer_ = malloc(deconv_param_->thread_num_ * deconv_param_->oc_div4_ * unit.w_size_ * unit.h_size_ *
DECONV_WINOGRAD_DEFAULT_TILE * C4NUM * sizeof(float));
@ -170,7 +169,6 @@ int DeConvolutionWinogradCPUKernel::InitParameter() {
}
}
}
return RET_OK;
}
@ -270,7 +268,7 @@ int DeConvolutionWinogradCPUKernel::InitDataParam() {
auto weight_tensor = in_tensors_.at(kWeightIndex);
float *nhwc_weight = reinterpret_cast<float *>(weight_tensor->data_c());
/* unit data : weight & winograd data*/
/* unit data : weight & winograd data */
for (int i = 0; i < deconv_param_->compute_size_; i++) {
DeConvComputeUnit *unit = &deconv_param_->compute_units_[i];
int ret = PackDeConvWgDataFp32(nhwc_weight, unit, conv_param_, deconv_param_);
@ -414,7 +412,7 @@ int DeConvolutionWinogradCPUKernel::Run() {
static_cast<const lite::InnerContext *>(this->context_)
->thread_pool_->ParallelLaunch(DeConvWgFp32Run, this, deconv_param_->thread_num_);
/*post bias activate and nhwc */
/* post bias activate and nhwc */
static_cast<const lite::InnerContext *>(this->context_)
->thread_pool_->ParallelLaunch(DeConvWgPostFp32Run, this, thread_num_hw_);
}

View File

@ -51,7 +51,6 @@ int FusedBatchnormCPUKernel::InitConstTensor() {
offset_ = malloc(offset->Size());
mean_ = malloc(mean->Size());
variance_ = malloc(variance->Size());
if (scale_ == nullptr || offset_ == nullptr || mean_ == nullptr || variance_ == nullptr) {
FreeMeanAndVariance();
FreeScaleAndOffset();
@ -62,7 +61,6 @@ int FusedBatchnormCPUKernel::InitConstTensor() {
memcpy(offset_, offset->MutableData(), offset->Size());
memcpy(mean_, mean->MutableData(), mean->Size());
memcpy(variance_, variance->MutableData(), variance->Size());
return RET_OK;
}
@ -98,7 +96,6 @@ int FusedBatchnormCPUKernel::Run() {
if (ret != RET_OK) {
MS_LOG(ERROR) << "BatchnormRun error error_code[" << ret << "]";
}
return ret;
}