!4654 ReviewBotCheck

Merge pull request !4654 from gongdaguo/ReviewBotCheck
This commit is contained in:
mindspore-ci-bot 2020-08-18 14:17:23 +08:00 committed by Gitee
commit 250ebbc96c
72 changed files with 39 additions and 98 deletions

View File

@ -122,7 +122,6 @@ int DeConv2D::InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vecto
pad_d_ = GetPadDown();
pad_r_ = GetPadRight();
auto pad_mode = (schema::PadMode)GetPadMode();
if (pad_mode == schema::PadMode_CAFFE) {
output_h = (input_h - 1) * stride_h + ((kernel_h - 1) * dilate_h + 1) - pad_u_ - pad_d_;
output_w = (input_w - 1) * stride_w + ((kernel_w - 1) * dilate_w + 1) - pad_l_ - pad_r_;

View File

@ -58,5 +58,4 @@ int ScatterND::InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vect
return 0;
}
} // namespace mindspore

View File

@ -20,7 +20,6 @@ namespace mindspore {
namespace {
constexpr int kShapeInputNum = 1;
constexpr int kShapeOutputNum = 1;
} // namespace
int Shape::InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) {
if (inputs_.size() != kShapeInputNum) {

View File

@ -161,6 +161,5 @@ void CompareOutput(float *output_data, std::string file_path) {
// }
// return "/data/data/" + packageName + '/';
//}
} // namespace lite
} // namespace mindspore

View File

@ -22,7 +22,6 @@
namespace mindspore {
namespace lite {
int CompareRelativeOutput(float *output_data, std::string file_path);
}
} // namespace mindspore
#endif // MINDSPORE_LITE_COMMON_FILE_UTILS_EXT_H_

View File

@ -75,7 +75,6 @@ std::vector<size_t> GetGraphOutputNodes(const schema::MetaGraph *meta_graph) {
// std::unordered_set<NODE_ID> OpNode::GetAllInEdges() { return inEdges; }
//
// std::unordered_set<NODE_ID> OpNode::GetAllOutEdges() { return outEdges; }
} // namespace lite
} // namespace mindspore

View File

@ -82,7 +82,6 @@ int OpGraph<NODE_T>::Build(const schema::MetaGraph *subGraphDef) {
return RET_ERROR;
}
auto opDefs = subGraphDef->nodes();
uint32_t opCount = opDefs->size();
@ -104,7 +103,7 @@ int OpGraph<NODE_T>::Build(const schema::MetaGraph *subGraphDef) {
}
template <typename NODE_T>
int OpGraph<NODE_T>::AddEdge(const schema::CNode *srcNodeDef,
const flatbuffers::Vector<flatbuffers::Offset<schema::CNode>> *nodeDefs) {
const flatbuffers::Vector<flatbuffers::Offset<schema::CNode>> *nodeDefs) {
MS_ASSERT(srcNodeDef != nullptr);
MS_ASSERT(nodeDefs != nullptr);
NODE_ID srcId = std::string(srcNodeDef->name()->c_str());
@ -242,7 +241,6 @@ OpGraph<NODE_T>::~OpGraph() {
}
nodes.clear();
}
} // namespace lite
} // namespace mindspore

View File

@ -146,6 +146,5 @@ std::vector<AnfNodePtr> DeepUsedGraphSearch(const AnfNodePtr &root, const Includ
std::vector<AnfNodePtr> DeepLinkedGraphSearch(const AnfNodePtr &root, const IncludeFunc &include) {
return DeepLinkedGraphSearcher(include).Search(root);
}
} // namespace mindspore

View File

@ -118,7 +118,7 @@ if (IsPrint(log_level_)) {
// #ifdef USE_ANDROID_LOG
#ifdef ENABLE_ARM
__android_log_print(GetAndroidLogLevel(log_level_), ANDROID_LOG_TAG, "[%s:%d] %s] %s", location_.file_,
location_.line_, location_.func_, msg.str().c_str());
location_.line_, location_.func_, msg.str().c_str());
#else
printf("%s [%s:%d] %s] %s\n:", EnumStrForMsLogLevel(log_level_), location_.file_, location_.line_, location_.func_,
msg.str().c_str());

View File

@ -29,7 +29,6 @@
namespace mindspore {
namespace lite {
namespace tensor {
struct QuantArg {
double scale;
int32_t zeroPoint;

View File

@ -362,5 +362,4 @@ session::LiteSession *session::LiteSession::CreateSession(lite::Context *context
}
return session;
}
} // namespace mindspore

View File

@ -33,7 +33,4 @@ void MatrixMultiplyFp16(const float16_t *matrix_a, const float16_t *matrix_b, fl
}
}
}
} // namespace mindspore::kernel

View File

@ -20,7 +20,7 @@
#include "nnacl/errorcode.h"
int DoSplitFp16(float16_t *in_data, float16_t **out_data, const int *input_shape, int offset, int num_unit,
SplitParameter *split_param) {
SplitParameter *split_param) {
if (in_data == NULL || out_data == NULL) {
return NNACL_ERR;
}

View File

@ -25,7 +25,7 @@
extern "C" {
#endif
int DoSplitFp16(float16_t *in_data, float16_t **out_data, const int *input_shape, int offset, int num_unit,
SplitParameter *split_param);
SplitParameter *split_param);
#ifdef __cplusplus
}
#endif

View File

@ -36,7 +36,7 @@ int32x4_t ClacScaledInput(int32x4_t input, int32x4_t left_shift_result_vec, int3
}
int16x4_t AddClacSumHalfWord(int32x4_t scaled_input0, int32x4_t scaled_input1, int32x4_t left_shift_out_vec,
int32x4_t output_multiplier_vec, AddQuantParameter *para) {
int32x4_t output_multiplier_vec, AddQuantParameter *para) {
int32x4_t raw_sum = vaddq_s32(scaled_input0, scaled_input1);
raw_sum = RoundingDivideByPOTInt32x4(vqrdmulhq_s32(vmulq_s32(raw_sum, left_shift_out_vec), output_multiplier_vec),

View File

@ -25,7 +25,7 @@
#ifdef ENABLE_NEON
int16x4_t ClacSumHalfWordMul(int32x4_t scaled_input0, int32x4_t scaled_input1, int32x4_t left_shift_out_vec,
int32x4_t output_multiplier_vec, MulQuantArg para) {
int32x4_t output_multiplier_vec, MulQuantArg para) {
int32x4_t input_scale = vmulq_s32(scaled_input0, scaled_input1);
int32x4_t raw_sum = RoundingDivideByPOTInt32x4(
SaturatingRoundingDoublingHighMulInt32x4(vmulq_s32(input_scale, left_shift_out_vec), output_multiplier_vec),

View File

@ -19,7 +19,7 @@
#include "nnacl/errorcode.h"
int PadConstant4D(const int8_t *in_data, int8_t *out_data, const int32_t *in_dims, const int32_t *out_dims,
const int32_t *paddings, const int tid, const int thread_num) {
const int32_t *paddings, const int tid, const int thread_num) {
int32_t copy_size = in_dims[3];
for (int n = 0; n < in_dims[0]; n++) {
for (int h = tid; h < in_dims[1]; h += thread_num) {

View File

@ -25,7 +25,7 @@
extern "C" {
#endif
int PadConstant4D(const int8_t *in_data, int8_t *out_data, const int32_t *in_dims, const int32_t *out_dims,
const int32_t *paddings, const int tid, const int thread_num);
const int32_t *paddings, const int tid, const int thread_num);
#ifdef __cplusplus
}
#endif

View File

@ -99,9 +99,9 @@ int SliceInt8(const int8_t *input, int8_t *output, SliceParameter *param) {
multiplier = input_scale / output_scale;
}
for (n = 0; n< param->size_[0]; ++n) {
for (n = 0; n < param->size_[0]; ++n) {
size_t out_offset0 = n * out_stride0;
size_t in_offset0 = (n+ param->begin_[0]) * in_stride0 + param->begin_[3];
size_t in_offset0 = (n + param->begin_[0]) * in_stride0 + param->begin_[3];
for (h = 0; h < count_per_thread; ++h) {
size_t k = h + thread_stride;
if (k >= out_dim1) {

View File

@ -22,8 +22,8 @@
#ifdef __cplusplus
extern "C" {
#endif
int SliceInt8NoParallel(const int8_t*input, int8_t *output, SliceParameter *param);
int SliceInt8(const int8_t*input, int8_t *output, SliceParameter *param);
int SliceInt8NoParallel(const int8_t *input, int8_t *output, SliceParameter *param);
int SliceInt8(const int8_t *input, int8_t *output, SliceParameter *param);
#ifdef __cplusplus
}
#endif

View File

@ -24,7 +24,7 @@
#ifdef ENABLE_NEON
int16x4_t DoClacSumHalfWord(int32x4_t scaled_input0, int32x4_t scaled_input1, int32x4_t left_shift_out_vec,
int32x4_t output_multiplier_vec, SubQuantArg *para) {
int32x4_t output_multiplier_vec, SubQuantArg *para) {
int32x4_t raw_data = vsubq_s32(scaled_input0, scaled_input1);
raw_data = RoundingDivideByPOTInt32x4(vqrdmulhq_s32(vmulq_s32(raw_data, left_shift_out_vec), output_multiplier_vec),

View File

@ -28,7 +28,7 @@ const int iMantissaBits = 31;
void QuantizeMultiplierSmallerThanOne(double double_multiplier, int32_t *quantized_multiplier,
int *right_shift) {
int *right_shift) {
if (quantized_multiplier == NULL || right_shift == NULL) {
return;
}
@ -38,7 +38,7 @@ void QuantizeMultiplierSmallerThanOne(double double_multiplier, int32_t *quantiz
}
void QuantizeRoundParameter(double double_multiplier, int32_t *quantized_multiplier, int *left_shift,
int *right_shift) {
int *right_shift) {
int shift;
QuantizeMultiplierSmallerThanOne(double_multiplier, quantized_multiplier, &shift);
shift = -shift;
@ -56,7 +56,7 @@ uint8_t QuantizeToUint8(float real_value, float scale, int32_t zp) { return roun
int32_t QuantizeToInt8(float real_value, float scale, int32_t zp) { return round(real_value / scale + zp); }
void CalculateActivationRangeQuantized(bool is_relu, bool is_relu6, int32_t zp, float scale, int *mini,
int *maxi) {
int *maxi) {
int32_t min = CHAR_MIN;
int32_t max = CHAR_MAX;
int32_t quantized_zero = QuantizeToInt8(0, scale, zp);

View File

@ -1364,7 +1364,6 @@ void Conv3x3Uint8OutputUnit(const int32_t *gemm_out, const int32_t *bias_data, i
}
}
}
} else {
for (int i = 0; i < C4NUM; i++) {
const int32_t *local_ptr = gemm_out + i;

View File

@ -21,7 +21,6 @@
namespace mindspore {
namespace kernel {
/**
* MindSpore to OpenCL channel order.
* @param num_channels

View File

@ -37,7 +37,6 @@ kernel::LiteKernel *GetOpenCLKernel(const std::vector<tensor::Tensor *> &in_tens
namespace mindspore {
namespace kernel {
std::vector<size_t> GetCommonGlobalSize(const std::vector<size_t> &local, const std::vector<size_t> &global) {
std::vector<size_t> result(3, 1);
for (int i = 0; i < 3; ++i) {

View File

@ -31,6 +31,5 @@ AnfNodePopulater *AnfNodePopulaterRegistry::GetNodePopulater(const std::string &
void AnfNodePopulaterRegistry::SetNodePopulater(const std::string &name, AnfNodePopulater *populater) {
populaters[name] = populater;
}
} // namespace lite
} // namespace mindspore

View File

@ -99,7 +99,6 @@ class OpGraphT : public OpGraph<OpNode> {
int AddEdge(NODE_ID srcId, NODE_ID dstId);
int AddEdge(const schema::CNodeT *srcNodeDef, const std::vector<std::unique_ptr<schema::CNodeT>> *nodeDefs);
};
} // namespace lite
} // namespace mindspore

View File

@ -55,8 +55,8 @@ size_t GetRefCount(schema::MetaGraphT *graphT, uint32_t tensorIdx);
std::unique_ptr<schema::QuantParamT> CopyQuantParamT(const std::unique_ptr<schema::QuantParamT> &srcQuantParam);
std::unique_ptr<schema::QuantParamT> \
CopyQuantParamArrayT(const std::unique_ptr<schema::QuantParamT> &srcQuantParamArray);
std::unique_ptr<schema::QuantParamT> CopyQuantParamArrayT(
const std::unique_ptr<schema::QuantParamT> &srcQuantParamArray);
std::unique_ptr<schema::QuantParamT> GetInTensorQuantParamArray(const schema::MetaGraphT &graphT, size_t tensorIdx);

View File

@ -20,7 +20,6 @@
namespace mindspore {
namespace lite {
STATUS AddConstFoldPass::Run(GraphNode *graphNode) { return ConstFoldPass::Run(graphNode); }
STATUS AddConstFoldPass::CreateOp(SubGraphDefT *subGraph, OpDefT *node) {

View File

@ -19,7 +19,6 @@
namespace mindspore {
namespace lite {
STATUS ConcatV2ConstFoldPass::Run(GraphNode *graphNode) { return ConstFoldPass::Run(graphNode); }
STATUS ConcatV2ConstFoldPass::CreateOp(SubGraphDefT *subGraph, OpDefT *node) {

View File

@ -20,7 +20,6 @@
namespace mindspore {
namespace lite {
STATUS RsqrtConstFoldPass::Run(GraphNode *graphNode) { return ConstFoldPass::Run(graphNode); }
STATUS RsqrtConstFoldPass::CreateOp(SubGraphDefT *subGraph, OpDefT *node) {

View File

@ -23,7 +23,6 @@
namespace mindspore {
namespace lite {
STATUS SubConstFoldPass::Run(GraphNode *graphNode) { return ConstFoldPass::Run(graphNode); }
STATUS SubConstFoldPass::CreateOp(SubGraphDefT *subGraph, OpDefT *node) {

View File

@ -20,7 +20,6 @@
namespace mindspore {
namespace lite {
STATUS TransposeConstFoldPass::Run(GraphNode *graphNode) { return ConstFoldPass::Run(graphNode); }
STATUS TransposeConstFoldPass::CreateOp(SubGraphDefT *subGraph, OpDefT *node) {

View File

@ -187,5 +187,3 @@ STATUS FormatTransFusionPass::DoFusion(schema::MetaGraphT *graph, const std::str
}
} // namespace lite
} // namespace mindspore

View File

@ -24,7 +24,6 @@
namespace mindspore {
namespace lite {
class EltwiseFormatTransPass : public FormatTransPass {
public:
EltwiseFormatTransPass() : FormatTransPass() {}

View File

@ -200,6 +200,5 @@ NodeIter FormatTransPass::InsertFormatTransNode(schema::MetaGraphT *graph, NodeI
void FormatTransPass::SetQuantType(QuantType quantType) { this->quantType = quantType; }
void FormatTransPass::SetFmk(converter::FmkType fmkType) { this->fmkType = fmkType; }
} // namespace lite
} // namespace mindspore

View File

@ -28,7 +28,6 @@ STATUS CaffeConcatParser::Parse(const caffe::LayerParameter &proto,
op->name = proto.name();
std::unique_ptr<schema::ConcatT> attr(new schema::ConcatT());
const caffe::ConcatParameter concatParam = proto.concat_param();
if (concatParam.has_axis() && concatParam.has_concat_dim()) {
// MS_LOGE("Concat param in caffe have concat_dim and axis simultaneously,return fail");
return RET_ERROR;
@ -37,7 +36,6 @@ STATUS CaffeConcatParser::Parse(const caffe::LayerParameter &proto,
if (concatParam.has_concat_dim()) {
// MS_LOGD("Concat dim , set axis:%d", concatParam.concat_dim());
int32_t concat_dim_value = (int32_t)concatParam.concat_dim();
if (concat_dim_value < 0) {
// MS_LOGE("concat_dim value in model is smaller than 0:%d", concat_dim_value);
return RET_ERROR;

View File

@ -32,7 +32,6 @@ STATUS CaffeCropParser::Parse(const caffe::LayerParameter &proto,
attr->offsets = offsets;
} else {
const caffe::CropParameter cropParam = proto.crop_param();
if (cropParam.has_axis()) {
if (cropParam.axis() == -1) {
// MS_LOGW("axis with -1 may lead to calculation errors when input less than 4 dims.");

View File

@ -34,7 +34,6 @@ STATUS CaffeEltwiseParser::Parse(const caffe::LayerParameter &proto, const caffe
}
const caffe::EltwiseParameter eltwiseParam = proto.eltwise_param();
if (eltwiseParam.coeff_size() != 0 && eltwiseParam.coeff_size() != proto.bottom_size()) {
MS_LOG(ERROR) << "Coeff size(" << eltwiseParam.coeff_size()
<< ") check fail, Eltwise Layer takes one coefficient per bottom blob.";

View File

@ -19,7 +19,7 @@
namespace mindspore {
namespace lite {
STATUS CaffeFlattenParser::Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight,
schema::CNodeT *op, std::vector<schema::TensorT *> *weightVec) {
schema::CNodeT *op, std::vector<schema::TensorT *> *weightVec) {
if (op == nullptr) {
// MS_LOG(ERROR) << "null pointer dereferencing.";
return RET_NULL_PTR;

View File

@ -23,7 +23,6 @@ STATUS CaffeInterpParser::Parse(const caffe::LayerParameter &proto, const caffe:
schema::CNodeT *op, std::vector<schema::TensorT *> *weightVec) {
std::unique_ptr<schema::ResizeT> attr(new schema::ResizeT());
const caffe::InterpParameter interpParam = proto.interp_param();
if (interpParam.has_height()) {
int64_t height = interpParam.height();
if (height < 0) {

View File

@ -27,7 +27,6 @@
namespace mindspore {
namespace lite {
class CaffeNodeParser {
public:
explicit CaffeNodeParser(const std::string &nodeName) : name(nodeName) {}

View File

@ -20,9 +20,9 @@
namespace mindspore {
namespace lite {
STATUS CaffePermuteParser::Parse(const caffe::LayerParameter &proto,
const caffe::LayerParameter &weight,
schema::CNodeT *op,
std::vector<schema::TensorT *> *weightVec) {
const caffe::LayerParameter &weight,
schema::CNodeT *op,
std::vector<schema::TensorT *> *weightVec) {
op->name = proto.name();
std::unique_ptr<schema::TransposeT> attr(new schema::TransposeT());
const caffe::PermuteParameter permuteParam = proto.permute_param();

View File

@ -25,7 +25,6 @@ STATUS CaffePReluParser::Parse(const caffe::LayerParameter &proto,
std::vector<schema::TensorT *> *weightVec) {
std::unique_ptr<schema::CaffePReLUT> attr(new schema::CaffePReLUT());
const caffe::PReLUParameter pReluParam = proto.prelu_param();
if (pReluParam.has_channel_shared()) {
attr->channelShared = pReluParam.channel_shared();
} else {

View File

@ -27,7 +27,6 @@ STATUS CaffeReshapeParser::Parse(const caffe::LayerParameter &proto,
attr->format = schema::Format_NCHW;
const caffe::ReshapeParameter reshapeParam = proto.reshape_param();
if (!reshapeParam.has_shape()) {
// MS_LOGE("Reshape has no shape info, ret fail");
return RET_ERROR;

View File

@ -150,6 +150,5 @@ TfliteNodeRegister g_TfliteHardSwishParser("HardSwish", new TfliteHardSwishParse
TfliteNodeRegister g_tfliteLogisticParser("Logistic", new TfliteLogisticParser());
TfliteNodeRegister g_tflitePreluParser("Prelu", new TflitePreluParser());
TfliteNodeRegister g_TfliteLeakyReluParser("LeakyRelu", new TfliteLeakyReluParser());
} // namespace lite
} // namespace mindspore

View File

@ -25,7 +25,6 @@
namespace mindspore {
namespace lite {
class TfliteActivationParser : public TfliteNodeParser {
public:
TfliteActivationParser() : TfliteNodeParser("node_name") {}
@ -89,7 +88,6 @@ class TfliteLeakyReluParser : public TfliteNodeParser {
std::vector<schema::Format> *tensors_format,
std::map<int, int> *tensors_id_map) override;
};
} // namespace lite
} // namespace mindspore

View File

@ -311,7 +311,6 @@ TfliteNodeRegister g_tfliteGreaterEParser("Greater", new TfliteGreaterParser());
TfliteNodeRegister g_tfliteGreaterEqualParser("GreaterEqual", new TfliteGreaterEqualParser());
TfliteNodeRegister g_tfliteLessParser("Less", new TfliteLessParser());
TfliteNodeRegister g_tfliteLessEqualParser("LessEqual", new TfliteLessEqualParser());
} // namespace lite
} // namespace mindspore

View File

@ -25,7 +25,6 @@
namespace mindspore {
namespace lite {
class TfliteDoubleInputOpParser : public TfliteNodeParser {
public:
TfliteDoubleInputOpParser() : TfliteNodeParser("node_name") {}
@ -206,7 +205,6 @@ class TfliteLessEqualParser : public TfliteCompareOpParser {
public:
TfliteLessEqualParser() : TfliteCompareOpParser() {}
};
} // namespace lite
} // namespace mindspore

View File

@ -72,6 +72,5 @@ STATUS TfliteBatchToSpaceParser::Parse(const std::unique_ptr<tflite::OperatorT>
TfliteNodeRegister g_tfliteBatchToSpaceParser("BatchToSpace", new TfliteBatchToSpaceParser());
TfliteNodeRegister g_TfliteBatchToSpaceNDParser("BatchToSpaceND", new TfliteBatchToSpaceNDParser());
} // namespace lite
} // namespace mindspore

View File

@ -42,7 +42,6 @@ class TfliteBatchToSpaceNDParser : public TfliteBatchToSpaceParser {
public:
TfliteBatchToSpaceNDParser() : TfliteBatchToSpaceParser() {}
};
} // namespace lite
} // namespace mindspore

View File

@ -22,8 +22,6 @@
namespace mindspore {
namespace lite {
STATUS TfliteDepthwiseConv2DParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op,
const std::vector<std::unique_ptr<tflite::TensorT>> &tflite_tensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tflite_model_buffer,

View File

@ -68,7 +68,7 @@ STATUS TfliteFullyConnectedParser::Parse(const std::unique_ptr<tflite::OperatorT
AddOpInput(op, tensors_id, tensors_format, tensors_id_map,
tflite_op->inputs[1], tensors_id->size(), tflite_tensors.size(), schema::Format_KHWC);
AddOpInput(op, tensors_id, tensors_format, tensors_id_map,
tflite_op->inputs[2], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC);
tflite_op->inputs[2], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC);
AddOpOutput(op, tensors_id, tensors_format, tensors_id_map,
tflite_op->outputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC);
return RET_OK;

View File

@ -71,6 +71,5 @@ STATUS TfliteLogicalParser::Parse(const std::unique_ptr<tflite::OperatorT> &tfli
TfliteNodeRegister g_TfliteLogicalAndParser("LogicalAnd", new TfliteLogicalAndParser());
TfliteNodeRegister g_TfliteLogicalNotParser("LogicalNot", new TfliteLogicalNotParser());
TfliteNodeRegister g_TfliteLogicalOrParser("LogicalOr", new TfliteLogicalOrParser());
} // namespace lite
} // namespace mindspore

View File

@ -25,7 +25,6 @@
namespace mindspore {
namespace lite {
class TfliteLogicalParser : public TfliteNodeParser {
public:
TfliteLogicalParser() : TfliteNodeParser("node_name") {}

View File

@ -59,7 +59,7 @@ STATUS TflitePadParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_o
op->primitive->value.value = attr.release();
AddOpInput(op, tensors_id, tensors_format, tensors_id_map,
tflite_op->inputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC);
tflite_op->inputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC);
AddOpOutput(op, tensors_id, tensors_format, tensors_id_map,
tflite_op->outputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC);
return RET_OK;

View File

@ -96,6 +96,5 @@ TfliteNodeRegister g_TfliteReduceMaxParser("ReduceMax", new TfliteReduceMaxParse
TfliteNodeRegister g_TfliteReduceMinParser("ReduceMin", new TfliteReduceMinParser());
TfliteNodeRegister g_TfliteReduceProdParser("ReduceProd", new TfliteReduceProdParser());
TfliteNodeRegister g_TfliteReduceAnyParser("ReduceAny", new TfliteReduceAnyParser());
} // namespace lite
} // namespace mindspore

View File

@ -67,7 +67,6 @@ class TfliteReduceAnyParser : public TfliteReduceParser {
public:
TfliteReduceAnyParser() : TfliteReduceParser() {}
};
} // namespace lite
} // namespace mindspore

View File

@ -49,7 +49,7 @@ STATUS TfliteReshapeParser::Parse(const std::unique_ptr<tflite::OperatorT> &tfli
return RET_ERROR;
}
auto shape_tensor_index = tflite_op->inputs[1];
const auto & shape_tensor = tflite_tensors[shape_tensor_index];
const auto &shape_tensor = tflite_tensors[shape_tensor_index];
if (shape_tensor == nullptr) {
MS_LOG(ERROR) << "shape_tensor is null";
return RET_NULL_PTR;

View File

@ -71,13 +71,13 @@ STATUS TfliteResizeParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflit
attr->preserveAspectRatio = false;
auto tfliteResizeTensorIndex = tflite_op->inputs[1];
const auto & shape_tensor = tflite_tensors[tfliteResizeTensorIndex];
const auto &shape_tensor = tflite_tensors[tfliteResizeTensorIndex];
if (shape_tensor == nullptr) {
MS_LOG(ERROR) << "shape_tensor is null";
return RET_NULL_PTR;
}
auto resizeTensorBufferIndex = shape_tensor->buffer;
const auto & buff = tflite_model_buffer.at(resizeTensorBufferIndex);
const auto &buff = tflite_model_buffer.at(resizeTensorBufferIndex);
if (buff == nullptr) {
MS_LOG(ERROR) << "buff_data is null";
return RET_NULL_PTR;
@ -92,7 +92,7 @@ STATUS TfliteResizeParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflit
op->primitive->value.value = attr.release();
AddOpInput(op, tensors_id, tensors_format, tensors_id_map,
tflite_op->inputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC);
tflite_op->inputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC);
AddOpOutput(op, tensors_id, tensors_format, tensors_id_map,
tflite_op->outputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC);
return RET_OK;

View File

@ -47,7 +47,6 @@ class TfliteResizeNearestNeighborParser : public TfliteResizeParser {
public:
TfliteResizeNearestNeighborParser() : TfliteResizeParser() {}
};
} // namespace lite
} // namespace mindspore

View File

@ -54,7 +54,7 @@ STATUS TfliteScatterNdParser::Parse(const std::unique_ptr<tflite::OperatorT> &tf
// in tflite, kIndices = 0, kUpdates = 1, kShape = 2
// in mslite, kScatterShapeIndex = 0, kScatterIndicesIndex = 1, kScatterUpdateIndex = 2;
AddOpInput(op, tensors_id, tensors_format, tensors_id_map,
tflite_op->inputs[2], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC);
tflite_op->inputs[2], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC);
AddOpInput(op, tensors_id, tensors_format, tensors_id_map,
tflite_op->inputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC);
AddOpInput(op, tensors_id, tensors_format, tensors_id_map,

View File

@ -192,10 +192,10 @@ size_t GetDataTypeSize(const TypeId &data_type) {
}
STATUS getPaddingParam(const std::unique_ptr<tflite::TensorT> &tensor,
schema::PadMode pad_mode,
int strideH, int strideW,
int windowH, int windowW,
std::vector<int> *params) {
schema::PadMode pad_mode,
int strideH, int strideW,
int windowH, int windowW,
std::vector<int> *params) {
if (tensor == nullptr) {
MS_LOG(ERROR) << "the input tensor is null";
return RET_ERROR;
@ -239,6 +239,5 @@ void Split(const std::string &src_str, std::vector<std::string> *dst_str, const
dst_str->push_back(src_str.substr(p1));
}
}
} // namespace lite
} // namespace mindspore

View File

@ -73,14 +73,13 @@ void BitPack::BitPacking(const std::vector<uint8_t>& originDataVec, std::vector<
}
size_t remainBitData = bitDataVec.size();
if ( 8 > remainBitData && remainBitData > 0 ) {
for ( int i = 0; i < 8 - remainBitData; i++ ) {
if (8 > remainBitData && remainBitData > 0) {
for (int i = 0; i < 8 - remainBitData; i++) {
bitDataVec.push(0);
}
PackFromOriginToUint8(bitDataVec, packedDataVec);
}
}
} // namespace lite
} // namespace mindspore

View File

@ -42,7 +42,6 @@ using std::vector;
namespace mindspore {
namespace lite {
namespace quant {
struct DivergInfo {
std::vector<float> histogram;
CNodePtr cnode;

View File

@ -33,7 +33,6 @@
namespace mindspore {
namespace lite {
namespace quant {
static constexpr size_t UINT8_QUANTIZATION = 8;
/**
@ -124,7 +123,6 @@ STATUS QuantFilter(ParamValueLitePtr &weightPtr, QuantType quantType, int quant_
size_t bitNum = UINT8_QUANTIZATION, bool per_channel = false);
STATUS PostBitPack(float *weights, size_t shapeSize, size_t bitNum = UINT8_QUANTIZATION);
} // namespace quant
} // namespace lite
} // namespace mindspore

View File

@ -26,7 +26,6 @@ using std::vector;
namespace mindspore {
namespace lite {
namespace quant {
WeightQuantizer::WeightQuantizer(FuncGraphPtr graph, const string &weightSize,
const std::string &convWeightChannelThreshold, const std::string &bitNum)
: Quantizer(graph) {

View File

@ -284,7 +284,7 @@ void CheckLeastInputSize(const CNodePtr &node, const int size) {
}
ParameterPtr AddNewBiasNode(float *bias_data, const FuncGraphPtr &func_graph, int kernel_num,
const ParamValueLitePtr &weight_tensor) {
const ParamValueLitePtr &weight_tensor) {
auto bias_parameter = func_graph->add_parameter();
MS_ASSERT(bias_parameter != nullptr);
std::vector<int> shape = {kernel_num};

View File

@ -48,7 +48,7 @@ void CheckIfNodeIsParam(const AnfNodePtr &node);
void CheckLeastInputSize(const CNodePtr &node, int size);
ParameterPtr AddNewBiasNode(float *bias_data, const FuncGraphPtr &func_graph, int kernel_num,
const ParamValueLitePtr &weight_tensor);
const ParamValueLitePtr &weight_tensor);
schema::PrimitiveType GetCNodeType(const BaseRef &node);

View File

@ -27,8 +27,8 @@ class ConvActivationFusion : public PatternProcessPass {
public:
explicit ConvActivationFusion(bool multigraph = true, const std::string &name = "conv_activation_fusion",
schema::PrimitiveType primitive = schema::PrimitiveType_LeakyReLU,
schema::ActivationType activation = schema::ActivationType_LEAKY_RELU) : primitive_type(
primitive), activation_type(activation), PatternProcessPass(name, multigraph) {}
schema::ActivationType activation = schema::ActivationType_LEAKY_RELU)
: primitive_type(primitive), activation_type(activation), PatternProcessPass(name, multigraph) {}
~ConvActivationFusion() override = default;
const BaseRef DefinePattern() const override;
const AnfNodePtr Process(const FuncGraphPtr &, const AnfNodePtr &, const EquivPtr &) const override;

View File

@ -394,6 +394,5 @@ int RunTimeProfile(int argc, const char **argv) {
return RET_OK;
}
} // namespace lite
} // namespace mindspore

View File

@ -34,7 +34,6 @@
namespace mindspore {
namespace lite {
class MS_API TimeProfileFlags : public virtual FlagParser {
public:
TimeProfileFlags() {