ReviewBot Check

This commit is contained in:
gongdaguo 2020-08-18 13:28:39 +08:00
parent 21502810dd
commit d2f6f2802e
72 changed files with 39 additions and 98 deletions

View File

@ -122,7 +122,6 @@ int DeConv2D::InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vecto
pad_d_ = GetPadDown();
pad_r_ = GetPadRight();
auto pad_mode = (schema::PadMode)GetPadMode();
if (pad_mode == schema::PadMode_CAFFE) {
output_h = (input_h - 1) * stride_h + ((kernel_h - 1) * dilate_h + 1) - pad_u_ - pad_d_;
output_w = (input_w - 1) * stride_w + ((kernel_w - 1) * dilate_w + 1) - pad_l_ - pad_r_;

View File

@ -58,5 +58,4 @@ int ScatterND::InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vect
return 0;
}
} // namespace mindspore

View File

@ -20,7 +20,6 @@ namespace mindspore {
namespace {
constexpr int kShapeInputNum = 1;
constexpr int kShapeOutputNum = 1;
} // namespace
int Shape::InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) {
if (inputs_.size() != kShapeInputNum) {

View File

@ -161,6 +161,5 @@ void CompareOutput(float *output_data, std::string file_path) {
// }
// return "/data/data/" + packageName + '/';
//}
} // namespace lite
} // namespace mindspore

View File

@ -22,7 +22,6 @@
namespace mindspore {
namespace lite {
int CompareRelativeOutput(float *output_data, std::string file_path);
}
} // namespace mindspore
#endif // MINDSPORE_LITE_COMMON_FILE_UTILS_EXT_H_

View File

@ -75,7 +75,6 @@ std::vector<size_t> GetGraphOutputNodes(const schema::MetaGraph *meta_graph) {
// std::unordered_set<NODE_ID> OpNode::GetAllInEdges() { return inEdges; }
//
// std::unordered_set<NODE_ID> OpNode::GetAllOutEdges() { return outEdges; }
} // namespace lite
} // namespace mindspore

View File

@ -82,7 +82,6 @@ int OpGraph<NODE_T>::Build(const schema::MetaGraph *subGraphDef) {
return RET_ERROR;
}
auto opDefs = subGraphDef->nodes();
uint32_t opCount = opDefs->size();
@ -242,7 +241,6 @@ OpGraph<NODE_T>::~OpGraph() {
}
nodes.clear();
}
} // namespace lite
} // namespace mindspore

View File

@ -146,6 +146,5 @@ std::vector<AnfNodePtr> DeepUsedGraphSearch(const AnfNodePtr &root, const Includ
std::vector<AnfNodePtr> DeepLinkedGraphSearch(const AnfNodePtr &root, const IncludeFunc &include) {
return DeepLinkedGraphSearcher(include).Search(root);
}
} // namespace mindspore

View File

@ -29,7 +29,6 @@
namespace mindspore {
namespace lite {
namespace tensor {
struct QuantArg {
double scale;
int32_t zeroPoint;

View File

@ -362,5 +362,4 @@ session::LiteSession *session::LiteSession::CreateSession(lite::Context *context
}
return session;
}
} // namespace mindspore

View File

@ -33,7 +33,4 @@ void MatrixMultiplyFp16(const float16_t *matrix_a, const float16_t *matrix_b, fl
}
}
}
} // namespace mindspore::kernel

View File

@ -99,9 +99,9 @@ int SliceInt8(const int8_t *input, int8_t *output, SliceParameter *param) {
multiplier = input_scale / output_scale;
}
for (n = 0; n< param->size_[0]; ++n) {
for (n = 0; n < param->size_[0]; ++n) {
size_t out_offset0 = n * out_stride0;
size_t in_offset0 = (n+ param->begin_[0]) * in_stride0 + param->begin_[3];
size_t in_offset0 = (n + param->begin_[0]) * in_stride0 + param->begin_[3];
for (h = 0; h < count_per_thread; ++h) {
size_t k = h + thread_stride;
if (k >= out_dim1) {

View File

@ -22,8 +22,8 @@
#ifdef __cplusplus
extern "C" {
#endif
int SliceInt8NoParallel(const int8_t*input, int8_t *output, SliceParameter *param);
int SliceInt8(const int8_t*input, int8_t *output, SliceParameter *param);
int SliceInt8NoParallel(const int8_t *input, int8_t *output, SliceParameter *param);
int SliceInt8(const int8_t *input, int8_t *output, SliceParameter *param);
#ifdef __cplusplus
}
#endif

View File

@ -1364,7 +1364,6 @@ void Conv3x3Uint8OutputUnit(const int32_t *gemm_out, const int32_t *bias_data, i
}
}
}
} else {
for (int i = 0; i < C4NUM; i++) {
const int32_t *local_ptr = gemm_out + i;

View File

@ -21,7 +21,6 @@
namespace mindspore {
namespace kernel {
/**
* MindSpore to OpenCL channel order.
* @param num_channels

View File

@ -37,7 +37,6 @@ kernel::LiteKernel *GetOpenCLKernel(const std::vector<tensor::Tensor *> &in_tens
namespace mindspore {
namespace kernel {
std::vector<size_t> GetCommonGlobalSize(const std::vector<size_t> &local, const std::vector<size_t> &global) {
std::vector<size_t> result(3, 1);
for (int i = 0; i < 3; ++i) {

View File

@ -31,6 +31,5 @@ AnfNodePopulater *AnfNodePopulaterRegistry::GetNodePopulater(const std::string &
void AnfNodePopulaterRegistry::SetNodePopulater(const std::string &name, AnfNodePopulater *populater) {
populaters[name] = populater;
}
} // namespace lite
} // namespace mindspore

View File

@ -99,7 +99,6 @@ class OpGraphT : public OpGraph<OpNode> {
int AddEdge(NODE_ID srcId, NODE_ID dstId);
int AddEdge(const schema::CNodeT *srcNodeDef, const std::vector<std::unique_ptr<schema::CNodeT>> *nodeDefs);
};
} // namespace lite
} // namespace mindspore

View File

@ -55,8 +55,8 @@ size_t GetRefCount(schema::MetaGraphT *graphT, uint32_t tensorIdx);
std::unique_ptr<schema::QuantParamT> CopyQuantParamT(const std::unique_ptr<schema::QuantParamT> &srcQuantParam);
std::unique_ptr<schema::QuantParamT> \
CopyQuantParamArrayT(const std::unique_ptr<schema::QuantParamT> &srcQuantParamArray);
std::unique_ptr<schema::QuantParamT> CopyQuantParamArrayT(
const std::unique_ptr<schema::QuantParamT> &srcQuantParamArray);
std::unique_ptr<schema::QuantParamT> GetInTensorQuantParamArray(const schema::MetaGraphT &graphT, size_t tensorIdx);

View File

@ -20,7 +20,6 @@
namespace mindspore {
namespace lite {
STATUS AddConstFoldPass::Run(GraphNode *graphNode) { return ConstFoldPass::Run(graphNode); }
STATUS AddConstFoldPass::CreateOp(SubGraphDefT *subGraph, OpDefT *node) {

View File

@ -19,7 +19,6 @@
namespace mindspore {
namespace lite {
STATUS ConcatV2ConstFoldPass::Run(GraphNode *graphNode) { return ConstFoldPass::Run(graphNode); }
STATUS ConcatV2ConstFoldPass::CreateOp(SubGraphDefT *subGraph, OpDefT *node) {

View File

@ -20,7 +20,6 @@
namespace mindspore {
namespace lite {
STATUS RsqrtConstFoldPass::Run(GraphNode *graphNode) { return ConstFoldPass::Run(graphNode); }
STATUS RsqrtConstFoldPass::CreateOp(SubGraphDefT *subGraph, OpDefT *node) {

View File

@ -23,7 +23,6 @@
namespace mindspore {
namespace lite {
STATUS SubConstFoldPass::Run(GraphNode *graphNode) { return ConstFoldPass::Run(graphNode); }
STATUS SubConstFoldPass::CreateOp(SubGraphDefT *subGraph, OpDefT *node) {

View File

@ -20,7 +20,6 @@
namespace mindspore {
namespace lite {
STATUS TransposeConstFoldPass::Run(GraphNode *graphNode) { return ConstFoldPass::Run(graphNode); }
STATUS TransposeConstFoldPass::CreateOp(SubGraphDefT *subGraph, OpDefT *node) {

View File

@ -187,5 +187,3 @@ STATUS FormatTransFusionPass::DoFusion(schema::MetaGraphT *graph, const std::str
}
} // namespace lite
} // namespace mindspore

View File

@ -24,7 +24,6 @@
namespace mindspore {
namespace lite {
class EltwiseFormatTransPass : public FormatTransPass {
public:
EltwiseFormatTransPass() : FormatTransPass() {}

View File

@ -200,6 +200,5 @@ NodeIter FormatTransPass::InsertFormatTransNode(schema::MetaGraphT *graph, NodeI
void FormatTransPass::SetQuantType(QuantType quantType) { this->quantType = quantType; }
void FormatTransPass::SetFmk(converter::FmkType fmkType) { this->fmkType = fmkType; }
} // namespace lite
} // namespace mindspore

View File

@ -28,7 +28,6 @@ STATUS CaffeConcatParser::Parse(const caffe::LayerParameter &proto,
op->name = proto.name();
std::unique_ptr<schema::ConcatT> attr(new schema::ConcatT());
const caffe::ConcatParameter concatParam = proto.concat_param();
if (concatParam.has_axis() && concatParam.has_concat_dim()) {
// MS_LOGE("Concat param in caffe have concat_dim and axis simultaneously,return fail");
return RET_ERROR;
@ -37,7 +36,6 @@ STATUS CaffeConcatParser::Parse(const caffe::LayerParameter &proto,
if (concatParam.has_concat_dim()) {
// MS_LOGD("Concat dim , set axis:%d", concatParam.concat_dim());
int32_t concat_dim_value = (int32_t)concatParam.concat_dim();
if (concat_dim_value < 0) {
// MS_LOGE("concat_dim value in model is smaller than 0:%d", concat_dim_value);
return RET_ERROR;

View File

@ -32,7 +32,6 @@ STATUS CaffeCropParser::Parse(const caffe::LayerParameter &proto,
attr->offsets = offsets;
} else {
const caffe::CropParameter cropParam = proto.crop_param();
if (cropParam.has_axis()) {
if (cropParam.axis() == -1) {
// MS_LOGW("axis with -1 may lead to calculation errors when input less than 4 dims.");

View File

@ -34,7 +34,6 @@ STATUS CaffeEltwiseParser::Parse(const caffe::LayerParameter &proto, const caffe
}
const caffe::EltwiseParameter eltwiseParam = proto.eltwise_param();
if (eltwiseParam.coeff_size() != 0 && eltwiseParam.coeff_size() != proto.bottom_size()) {
MS_LOG(ERROR) << "Coeff size(" << eltwiseParam.coeff_size()
<< ") check fail, Eltwise Layer takes one coefficient per bottom blob.";

View File

@ -23,7 +23,6 @@ STATUS CaffeInterpParser::Parse(const caffe::LayerParameter &proto, const caffe:
schema::CNodeT *op, std::vector<schema::TensorT *> *weightVec) {
std::unique_ptr<schema::ResizeT> attr(new schema::ResizeT());
const caffe::InterpParameter interpParam = proto.interp_param();
if (interpParam.has_height()) {
int64_t height = interpParam.height();
if (height < 0) {

View File

@ -27,7 +27,6 @@
namespace mindspore {
namespace lite {
class CaffeNodeParser {
public:
explicit CaffeNodeParser(const std::string &nodeName) : name(nodeName) {}

View File

@ -25,7 +25,6 @@ STATUS CaffePReluParser::Parse(const caffe::LayerParameter &proto,
std::vector<schema::TensorT *> *weightVec) {
std::unique_ptr<schema::CaffePReLUT> attr(new schema::CaffePReLUT());
const caffe::PReLUParameter pReluParam = proto.prelu_param();
if (pReluParam.has_channel_shared()) {
attr->channelShared = pReluParam.channel_shared();
} else {

View File

@ -27,7 +27,6 @@ STATUS CaffeReshapeParser::Parse(const caffe::LayerParameter &proto,
attr->format = schema::Format_NCHW;
const caffe::ReshapeParameter reshapeParam = proto.reshape_param();
if (!reshapeParam.has_shape()) {
// MS_LOGE("Reshape has no shape info, ret fail");
return RET_ERROR;

View File

@ -150,6 +150,5 @@ TfliteNodeRegister g_TfliteHardSwishParser("HardSwish", new TfliteHardSwishParse
TfliteNodeRegister g_tfliteLogisticParser("Logistic", new TfliteLogisticParser());
TfliteNodeRegister g_tflitePreluParser("Prelu", new TflitePreluParser());
TfliteNodeRegister g_TfliteLeakyReluParser("LeakyRelu", new TfliteLeakyReluParser());
} // namespace lite
} // namespace mindspore

View File

@ -25,7 +25,6 @@
namespace mindspore {
namespace lite {
class TfliteActivationParser : public TfliteNodeParser {
public:
TfliteActivationParser() : TfliteNodeParser("node_name") {}
@ -89,7 +88,6 @@ class TfliteLeakyReluParser : public TfliteNodeParser {
std::vector<schema::Format> *tensors_format,
std::map<int, int> *tensors_id_map) override;
};
} // namespace lite
} // namespace mindspore

View File

@ -311,7 +311,6 @@ TfliteNodeRegister g_tfliteGreaterEParser("Greater", new TfliteGreaterParser());
TfliteNodeRegister g_tfliteGreaterEqualParser("GreaterEqual", new TfliteGreaterEqualParser());
TfliteNodeRegister g_tfliteLessParser("Less", new TfliteLessParser());
TfliteNodeRegister g_tfliteLessEqualParser("LessEqual", new TfliteLessEqualParser());
} // namespace lite
} // namespace mindspore

View File

@ -25,7 +25,6 @@
namespace mindspore {
namespace lite {
class TfliteDoubleInputOpParser : public TfliteNodeParser {
public:
TfliteDoubleInputOpParser() : TfliteNodeParser("node_name") {}
@ -206,7 +205,6 @@ class TfliteLessEqualParser : public TfliteCompareOpParser {
public:
TfliteLessEqualParser() : TfliteCompareOpParser() {}
};
} // namespace lite
} // namespace mindspore

View File

@ -72,6 +72,5 @@ STATUS TfliteBatchToSpaceParser::Parse(const std::unique_ptr<tflite::OperatorT>
TfliteNodeRegister g_tfliteBatchToSpaceParser("BatchToSpace", new TfliteBatchToSpaceParser());
TfliteNodeRegister g_TfliteBatchToSpaceNDParser("BatchToSpaceND", new TfliteBatchToSpaceNDParser());
} // namespace lite
} // namespace mindspore

View File

@ -42,7 +42,6 @@ class TfliteBatchToSpaceNDParser : public TfliteBatchToSpaceParser {
public:
TfliteBatchToSpaceNDParser() : TfliteBatchToSpaceParser() {}
};
} // namespace lite
} // namespace mindspore

View File

@ -22,8 +22,6 @@
namespace mindspore {
namespace lite {
STATUS TfliteDepthwiseConv2DParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op,
const std::vector<std::unique_ptr<tflite::TensorT>> &tflite_tensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tflite_model_buffer,

View File

@ -71,6 +71,5 @@ STATUS TfliteLogicalParser::Parse(const std::unique_ptr<tflite::OperatorT> &tfli
TfliteNodeRegister g_TfliteLogicalAndParser("LogicalAnd", new TfliteLogicalAndParser());
TfliteNodeRegister g_TfliteLogicalNotParser("LogicalNot", new TfliteLogicalNotParser());
TfliteNodeRegister g_TfliteLogicalOrParser("LogicalOr", new TfliteLogicalOrParser());
} // namespace lite
} // namespace mindspore

View File

@ -25,7 +25,6 @@
namespace mindspore {
namespace lite {
class TfliteLogicalParser : public TfliteNodeParser {
public:
TfliteLogicalParser() : TfliteNodeParser("node_name") {}

View File

@ -96,6 +96,5 @@ TfliteNodeRegister g_TfliteReduceMaxParser("ReduceMax", new TfliteReduceMaxParse
TfliteNodeRegister g_TfliteReduceMinParser("ReduceMin", new TfliteReduceMinParser());
TfliteNodeRegister g_TfliteReduceProdParser("ReduceProd", new TfliteReduceProdParser());
TfliteNodeRegister g_TfliteReduceAnyParser("ReduceAny", new TfliteReduceAnyParser());
} // namespace lite
} // namespace mindspore

View File

@ -67,7 +67,6 @@ class TfliteReduceAnyParser : public TfliteReduceParser {
public:
TfliteReduceAnyParser() : TfliteReduceParser() {}
};
} // namespace lite
} // namespace mindspore

View File

@ -49,7 +49,7 @@ STATUS TfliteReshapeParser::Parse(const std::unique_ptr<tflite::OperatorT> &tfli
return RET_ERROR;
}
auto shape_tensor_index = tflite_op->inputs[1];
const auto & shape_tensor = tflite_tensors[shape_tensor_index];
const auto &shape_tensor = tflite_tensors[shape_tensor_index];
if (shape_tensor == nullptr) {
MS_LOG(ERROR) << "shape_tensor is null";
return RET_NULL_PTR;

View File

@ -71,13 +71,13 @@ STATUS TfliteResizeParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflit
attr->preserveAspectRatio = false;
auto tfliteResizeTensorIndex = tflite_op->inputs[1];
const auto & shape_tensor = tflite_tensors[tfliteResizeTensorIndex];
const auto &shape_tensor = tflite_tensors[tfliteResizeTensorIndex];
if (shape_tensor == nullptr) {
MS_LOG(ERROR) << "shape_tensor is null";
return RET_NULL_PTR;
}
auto resizeTensorBufferIndex = shape_tensor->buffer;
const auto & buff = tflite_model_buffer.at(resizeTensorBufferIndex);
const auto &buff = tflite_model_buffer.at(resizeTensorBufferIndex);
if (buff == nullptr) {
MS_LOG(ERROR) << "buff_data is null";
return RET_NULL_PTR;

View File

@ -47,7 +47,6 @@ class TfliteResizeNearestNeighborParser : public TfliteResizeParser {
public:
TfliteResizeNearestNeighborParser() : TfliteResizeParser() {}
};
} // namespace lite
} // namespace mindspore

View File

@ -239,6 +239,5 @@ void Split(const std::string &src_str, std::vector<std::string> *dst_str, const
dst_str->push_back(src_str.substr(p1));
}
}
} // namespace lite
} // namespace mindspore

View File

@ -73,14 +73,13 @@ void BitPack::BitPacking(const std::vector<uint8_t>& originDataVec, std::vector<
}
size_t remainBitData = bitDataVec.size();
if ( 8 > remainBitData && remainBitData > 0 ) {
for ( int i = 0; i < 8 - remainBitData; i++ ) {
if (8 > remainBitData && remainBitData > 0) {
for (int i = 0; i < 8 - remainBitData; i++) {
bitDataVec.push(0);
}
PackFromOriginToUint8(bitDataVec, packedDataVec);
}
}
} // namespace lite
} // namespace mindspore

View File

@ -42,7 +42,6 @@ using std::vector;
namespace mindspore {
namespace lite {
namespace quant {
struct DivergInfo {
std::vector<float> histogram;
CNodePtr cnode;

View File

@ -33,7 +33,6 @@
namespace mindspore {
namespace lite {
namespace quant {
static constexpr size_t UINT8_QUANTIZATION = 8;
/**
@ -124,7 +123,6 @@ STATUS QuantFilter(ParamValueLitePtr &weightPtr, QuantType quantType, int quant_
size_t bitNum = UINT8_QUANTIZATION, bool per_channel = false);
STATUS PostBitPack(float *weights, size_t shapeSize, size_t bitNum = UINT8_QUANTIZATION);
} // namespace quant
} // namespace lite
} // namespace mindspore

View File

@ -26,7 +26,6 @@ using std::vector;
namespace mindspore {
namespace lite {
namespace quant {
WeightQuantizer::WeightQuantizer(FuncGraphPtr graph, const string &weightSize,
const std::string &convWeightChannelThreshold, const std::string &bitNum)
: Quantizer(graph) {

View File

@ -27,8 +27,8 @@ class ConvActivationFusion : public PatternProcessPass {
public:
explicit ConvActivationFusion(bool multigraph = true, const std::string &name = "conv_activation_fusion",
schema::PrimitiveType primitive = schema::PrimitiveType_LeakyReLU,
schema::ActivationType activation = schema::ActivationType_LEAKY_RELU) : primitive_type(
primitive), activation_type(activation), PatternProcessPass(name, multigraph) {}
schema::ActivationType activation = schema::ActivationType_LEAKY_RELU)
: primitive_type(primitive), activation_type(activation), PatternProcessPass(name, multigraph) {}
~ConvActivationFusion() override = default;
const BaseRef DefinePattern() const override;
const AnfNodePtr Process(const FuncGraphPtr &, const AnfNodePtr &, const EquivPtr &) const override;

View File

@ -394,6 +394,5 @@ int RunTimeProfile(int argc, const char **argv) {
return RET_OK;
}
} // namespace lite
} // namespace mindspore

View File

@ -34,7 +34,6 @@
namespace mindspore {
namespace lite {
class MS_API TimeProfileFlags : public virtual FlagParser {
public:
TimeProfileFlags() {