forked from mindspore-Ecosystem/mindspore
!4747 modify caffe & tflite parsers format
Merge pull request !4747 from lyvette/tflite_parser
This commit is contained in:
commit
20e80c56b3
|
@ -23,11 +23,25 @@ STATUS CaffeArgMaxParser::Parse(const caffe::LayerParameter &proto,
|
|||
const caffe::LayerParameter &weight,
|
||||
schema::CNodeT *op,
|
||||
std::vector<schema::TensorT *> *weightVec) {
|
||||
op->name = proto.name();
|
||||
std::unique_ptr<schema::ArgMaxT> attr = std::make_unique<schema::ArgMaxT>();
|
||||
const caffe::ArgMaxParameter argmaxParam = proto.argmax_param();
|
||||
MS_LOG(DEBUG) << "parse CaffeArgMaxParser";
|
||||
if (op == nullptr) {
|
||||
MS_LOG(ERROR) << "op is null";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
op->primitive = std::make_unique<schema::PrimitiveT>();
|
||||
if (op->primitive == nullptr) {
|
||||
MS_LOG(ERROR) << "op->primitive is null";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
|
||||
int32_t axisType = 0;
|
||||
std::unique_ptr<schema::ArgMaxT> attr = std::make_unique<schema::ArgMaxT>();
|
||||
if (attr == nullptr) {
|
||||
MS_LOG(ERROR) << "new op failed";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
|
||||
const caffe::ArgMaxParameter argmaxParam = proto.argmax_param();
|
||||
int32_t axisType;
|
||||
int32_t axis = 0;
|
||||
if (!argmaxParam.has_axis()) {
|
||||
axisType = 2;
|
||||
|
@ -35,20 +49,19 @@ STATUS CaffeArgMaxParser::Parse(const caffe::LayerParameter &proto,
|
|||
axisType = 1;
|
||||
axis = (int64_t)argmaxParam.axis();
|
||||
if (axis == -1) {
|
||||
// MS_LOGE("axis with -1 may lead to calculation errors when input less than 4 dims.");
|
||||
MS_LOG(ERROR) << "axis with -1 may lead to calculation errors when input less than 4 dims.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
}
|
||||
|
||||
attr->axis = axis;
|
||||
attr->axisType = axisType;
|
||||
attr->outMaxValue = argmaxParam.out_max_val();
|
||||
attr->topK = argmaxParam.top_k();
|
||||
attr->keepDims = true;
|
||||
|
||||
op->primitive = std::make_unique<schema::PrimitiveT>();
|
||||
op->primitive->value.value = attr.release();
|
||||
op->name = proto.name();
|
||||
op->primitive->value.type = schema::PrimitiveType_ArgMax;
|
||||
op->primitive->value.value = attr.release();
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
|
|
|
@ -14,8 +14,8 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_ARGMAX_PARSER_H_
|
||||
#define MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_ARGMAX_PARSER_H_
|
||||
#ifndef MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_ARGMAX_PARSER_H_
|
||||
#define MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_ARGMAX_PARSER_H_
|
||||
|
||||
#include <vector>
|
||||
#include "mindspore/lite/tools/converter/parser/caffe/caffe_node_parser.h"
|
||||
|
@ -27,11 +27,13 @@ class CaffeArgMaxParser : public CaffeNodeParser {
|
|||
public:
|
||||
CaffeArgMaxParser() : CaffeNodeParser("argmax") {}
|
||||
|
||||
STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op,
|
||||
STATUS Parse(const caffe::LayerParameter &proto,
|
||||
const caffe::LayerParameter &weight,
|
||||
schema::CNodeT *op,
|
||||
std::vector<schema::TensorT *> *weightVec) override;
|
||||
};
|
||||
} // namespace lite
|
||||
} // namespace mindspore
|
||||
|
||||
#endif // MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_ARGMAX_PARSER_H_
|
||||
#endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_ARGMAX_PARSER_H_
|
||||
|
||||
|
|
|
@ -14,9 +14,9 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "mindspore/lite/tools/converter/parser/caffe/caffe_batchnorm_parser.h"
|
||||
#include <cmath>
|
||||
#include <memory>
|
||||
#include "mindspore/lite/tools/converter/parser/caffe/caffe_batchnorm_parser.h"
|
||||
#include "tools/common/tensor_util.h"
|
||||
|
||||
#define CAFFE_BATCH_NORM_ESP_DEFAULT_FLOAT 0.00001
|
||||
|
@ -28,13 +28,29 @@ static const int CAFFE_BATCHNORMAL_TOP_SIZE = 1;
|
|||
namespace mindspore {
|
||||
namespace lite {
|
||||
using STATUS = int;
|
||||
STATUS CaffeBatchNormParser::Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight,
|
||||
schema::CNodeT *op, std::vector<schema::TensorT *> *weightVec) {
|
||||
op->name = proto.name();
|
||||
// caffe batch norm attr
|
||||
std::unique_ptr<schema::BatchNormT> attr = std::make_unique<schema::BatchNormT>();
|
||||
const caffe::BatchNormParameter batchNormParam = proto.batch_norm_param();
|
||||
|
||||
STATUS CaffeBatchNormParser::Parse(const caffe::LayerParameter &proto,
|
||||
const caffe::LayerParameter &weight,
|
||||
schema::CNodeT *op,
|
||||
std::vector<schema::TensorT *> *weightVec) {
|
||||
MS_LOG(DEBUG) << "parse CaffeBatchNormParser";
|
||||
if (op == nullptr) {
|
||||
MS_LOG(ERROR) << "op is null";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
op->primitive = std::make_unique<schema::PrimitiveT>();
|
||||
if (op->primitive == nullptr) {
|
||||
MS_LOG(ERROR) << "op->primitive is null";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
|
||||
std::unique_ptr<schema::BatchNormT> attr = std::make_unique<schema::BatchNormT>();
|
||||
if (attr == nullptr) {
|
||||
MS_LOG(ERROR) << "new op failed";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
|
||||
const caffe::BatchNormParameter batchNormParam = proto.batch_norm_param();
|
||||
// check bottom size
|
||||
if (proto.bottom_size() != CAFFE_BATCHNORMAL_BOTTOM_SIZE) {
|
||||
MS_LOG(ERROR) << "Layer " << proto.name().c_str() << "bottom numbers is error, it must be " \
|
||||
|
@ -50,7 +66,8 @@ STATUS CaffeBatchNormParser::Parse(const caffe::LayerParameter &proto, const caf
|
|||
}
|
||||
|
||||
if (batchNormParam.has_eps()) {
|
||||
if (fabs(CAFFE_BATCH_NORM_ESP_DEFAULT_FLOAT - batchNormParam.eps()) < CAFFE_BATCH_NORM_ESP_DEFAULT_DIFF_FLOAT) {
|
||||
if (fabs(CAFFE_BATCH_NORM_ESP_DEFAULT_FLOAT - batchNormParam.eps())
|
||||
< CAFFE_BATCH_NORM_ESP_DEFAULT_DIFF_FLOAT) {
|
||||
attr->epsilon = CAFFE_BATCH_NORM_ESP_DEFAULT_FLOAT;
|
||||
} else {
|
||||
auto tmpAuto = batchNormParam.eps();
|
||||
|
@ -67,7 +84,7 @@ STATUS CaffeBatchNormParser::Parse(const caffe::LayerParameter &proto, const caf
|
|||
// parse weight gamma
|
||||
auto gamma = ConvertWeight(weight.blobs(0));
|
||||
if (gamma == nullptr) {
|
||||
// MS_LOGE("Convert blobs(0) for layer %s failed", weight.name().c_str());
|
||||
MS_LOG(ERROR) << "Convert blobs(0) for layer " << weight.name().c_str() << " failed";
|
||||
return RET_ERROR;
|
||||
}
|
||||
|
||||
|
@ -82,7 +99,7 @@ STATUS CaffeBatchNormParser::Parse(const caffe::LayerParameter &proto, const caf
|
|||
// parse weight beta
|
||||
auto beta = ConvertWeight(weight.blobs(1));
|
||||
if (beta == nullptr) {
|
||||
// MS_LOGE("Convert blobs(1) for layer %s failed", weight.name().c_str());
|
||||
MS_LOG(ERROR) << "Convert blobs(1) for layer " << weight.name().c_str() << " failed";
|
||||
return RET_ERROR;
|
||||
}
|
||||
|
||||
|
@ -94,10 +111,9 @@ STATUS CaffeBatchNormParser::Parse(const caffe::LayerParameter &proto, const caf
|
|||
estimatedVariance = nullptr;
|
||||
weightVec->push_back(beta);
|
||||
|
||||
op->primitive = std::make_unique<schema::PrimitiveT>();
|
||||
op->name = proto.name();
|
||||
op->primitive->value.type = schema::PrimitiveType_BatchNorm;
|
||||
op->primitive->value.value = attr.release();
|
||||
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
|
|
|
@ -14,8 +14,8 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_BATCHNORM_PARSER_H_
|
||||
#define MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_BATCHNORM_PARSER_H_
|
||||
#ifndef MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_BATCHNORM_PARSER_H_
|
||||
#define MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_BATCHNORM_PARSER_H_
|
||||
|
||||
#include <vector>
|
||||
#include "mindspore/lite/tools/converter/parser/caffe/caffe_node_parser.h"
|
||||
|
@ -27,11 +27,13 @@ class CaffeBatchNormParser : public CaffeNodeParser {
|
|||
public:
|
||||
CaffeBatchNormParser() : CaffeNodeParser("batchnorm") {}
|
||||
|
||||
STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op,
|
||||
STATUS Parse(const caffe::LayerParameter &proto,
|
||||
const caffe::LayerParameter &weight,
|
||||
schema::CNodeT *op,
|
||||
std::vector<schema::TensorT *> *weightVec) override;
|
||||
};
|
||||
} // namespace lite
|
||||
} // namespace mindspore
|
||||
|
||||
#endif // MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_BATCHNORM_PARSER_H_
|
||||
#endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_BATCHNORM_PARSER_H_
|
||||
|
||||
|
|
|
@ -14,8 +14,8 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include <memory>
|
||||
#include "mindspore/lite/tools/converter/parser/caffe/caffe_concat_parser.h"
|
||||
#include <memory>
|
||||
|
||||
const int32_t CONCAT_DEFAULT_AXIS = 1;
|
||||
|
||||
|
@ -25,33 +25,48 @@ STATUS CaffeConcatParser::Parse(const caffe::LayerParameter &proto,
|
|||
const caffe::LayerParameter &weight,
|
||||
schema::CNodeT *op,
|
||||
std::vector<schema::TensorT *> *weightVec) {
|
||||
op->name = proto.name();
|
||||
MS_LOG(DEBUG) << "parse CaffeConcatParser";
|
||||
if (op == nullptr) {
|
||||
MS_LOG(ERROR) << "op is null";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
op->primitive = std::make_unique<schema::PrimitiveT>();
|
||||
if (op->primitive == nullptr) {
|
||||
MS_LOG(ERROR) << "op->primitive is null";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
|
||||
std::unique_ptr<schema::ConcatT> attr = std::make_unique<schema::ConcatT>();
|
||||
if (attr == nullptr) {
|
||||
MS_LOG(ERROR) << "new op failed";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
|
||||
const caffe::ConcatParameter concatParam = proto.concat_param();
|
||||
if (concatParam.has_axis() && concatParam.has_concat_dim()) {
|
||||
// MS_LOGE("Concat param in caffe have concat_dim and axis simultaneously,return fail");
|
||||
MS_LOG(ERROR) << "Concat param in caffe have concat_dim and axis simultaneously, return fail";
|
||||
return RET_ERROR;
|
||||
}
|
||||
|
||||
if (concatParam.has_concat_dim()) {
|
||||
// MS_LOGD("Concat dim , set axis:%d", concatParam.concat_dim());
|
||||
MS_LOG(DEBUG) << "Concat dim , set axis: " << concatParam.concat_dim();
|
||||
int32_t concat_dim_value = (int32_t)concatParam.concat_dim();
|
||||
if (concat_dim_value < 0) {
|
||||
// MS_LOGE("concat_dim value in model is smaller than 0:%d", concat_dim_value);
|
||||
MS_LOG(ERROR) << "concat_dim value in model is smaller than 0:" << concat_dim_value;
|
||||
return RET_ERROR;
|
||||
}
|
||||
attr->axis = concat_dim_value;
|
||||
} else if (concatParam.has_axis()) {
|
||||
// MS_LOGD("axis , set axis:%d", concatParam.axis());
|
||||
MS_LOG(DEBUG) << "axis , set axis: " << concatParam.axis();
|
||||
int32_t tmpInt = (int32_t)concatParam.axis();
|
||||
attr->axis = tmpInt;
|
||||
} else {
|
||||
// MS_LOGD("default , set axis:%d", CONCAT_DEFAULT_AXIS);
|
||||
MS_LOG(DEBUG) << "default , set axis: " << CONCAT_DEFAULT_AXIS;
|
||||
attr->axis = CONCAT_DEFAULT_AXIS;
|
||||
}
|
||||
|
||||
attr->n = proto.bottom_size();
|
||||
op->primitive = std::make_unique<schema::PrimitiveT>();
|
||||
|
||||
op->name = proto.name();
|
||||
op->primitive->value.type = schema::PrimitiveType_Concat;
|
||||
op->primitive->value.value = attr.release();
|
||||
return RET_OK;
|
||||
|
|
|
@ -14,8 +14,8 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_CONCAT_PARSER_H_
|
||||
#define MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_CONCAT_PARSER_H_
|
||||
#ifndef MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_CONCAT_PARSER_H_
|
||||
#define MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_CONCAT_PARSER_H_
|
||||
|
||||
#include <vector>
|
||||
#include "mindspore/lite/tools/converter/parser/caffe/caffe_node_parser.h"
|
||||
|
@ -27,11 +27,13 @@ class CaffeConcatParser : public CaffeNodeParser {
|
|||
public:
|
||||
CaffeConcatParser() : CaffeNodeParser("concat") {}
|
||||
|
||||
STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op,
|
||||
STATUS Parse(const caffe::LayerParameter &proto,
|
||||
const caffe::LayerParameter &weight,
|
||||
schema::CNodeT *op,
|
||||
std::vector<schema::TensorT *> *weightVec) override;
|
||||
};
|
||||
} // namespace lite
|
||||
} // namespace mindspore
|
||||
|
||||
#endif // MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_CONCAT_PARSER_H_
|
||||
#endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_CONCAT_PARSER_H_
|
||||
|
||||
|
|
|
@ -26,7 +26,8 @@ static const int CAFFE_CONV_BIAS_DIM_NUM = 1;
|
|||
|
||||
namespace mindspore {
|
||||
namespace lite {
|
||||
STATUS CaffeConvBaseParser::ParsePads(const caffe::ConvolutionParameter &convParam, std::vector<int64_t> *pad) {
|
||||
STATUS CaffeConvBaseParser::ParsePads(const caffe::ConvolutionParameter &convParam,
|
||||
std::vector<int64_t> *pad) {
|
||||
/**
|
||||
* padUp = padH;
|
||||
* padDown = padH;
|
||||
|
@ -35,7 +36,7 @@ STATUS CaffeConvBaseParser::ParsePads(const caffe::ConvolutionParameter &convPar
|
|||
*/
|
||||
if (convParam.has_pad_h() || convParam.has_pad_w()) {
|
||||
if (convParam.pad_size() != 0) {
|
||||
MS_LOG(ERROR) << "Either pad or pad_h/w should be specified; not both";
|
||||
MS_LOG(ERROR) << "Either pad or pad_h/w should be specified; not both.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
|
||||
|
@ -73,7 +74,8 @@ STATUS CaffeConvBaseParser::ParsePads(const caffe::ConvolutionParameter &convPar
|
|||
return RET_OK;
|
||||
}
|
||||
|
||||
STATUS CaffeConvBaseParser::ParseStrides(const caffe::ConvolutionParameter &convParam, std::vector<int64_t> *stride) {
|
||||
STATUS CaffeConvBaseParser::ParseStrides(const caffe::ConvolutionParameter &convParam,
|
||||
std::vector<int64_t> *stride) {
|
||||
if (convParam.has_stride_h() || convParam.has_stride_w()) {
|
||||
if (convParam.stride_size() != 0) {
|
||||
MS_LOG(ERROR) << "Either stride or stride_h/w should be specified; not both";
|
||||
|
@ -117,7 +119,8 @@ STATUS CaffeConvBaseParser::ParseDilations(const caffe::ConvolutionParameter &co
|
|||
return RET_OK;
|
||||
}
|
||||
|
||||
STATUS CaffeConvBaseParser::ParseKernels(const caffe::ConvolutionParameter &convParam, std::vector<int64_t> *kernel) {
|
||||
STATUS CaffeConvBaseParser::ParseKernels(const caffe::ConvolutionParameter &convParam,
|
||||
std::vector<int64_t> *kernel) {
|
||||
if (convParam.has_kernel_h() || convParam.has_kernel_w()) {
|
||||
if (convParam.kernel_size_size() != 0) {
|
||||
MS_LOG(ERROR) << "Either kernel_size or kernel_h/w should be specified; not both.";
|
||||
|
@ -146,7 +149,8 @@ STATUS CaffeConvBaseParser::ParseKernels(const caffe::ConvolutionParameter &conv
|
|||
return RET_OK;
|
||||
}
|
||||
|
||||
int CaffeConvBaseParser::ParseGroup(const caffe::ConvolutionParameter &convParam, const std::string &layerType) {
|
||||
int CaffeConvBaseParser::ParseGroup(const caffe::ConvolutionParameter &convParam,
|
||||
const std::string &layerType) {
|
||||
// group default 1
|
||||
int group = 0;
|
||||
if (convParam.has_group()) {
|
||||
|
|
|
@ -14,8 +14,8 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_CONV_BASE_PARSER_H_
|
||||
#define MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_CONV_BASE_PARSER_H_
|
||||
#ifndef MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_CONV_BASE_PARSER_H_
|
||||
#define MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_CONV_BASE_PARSER_H_
|
||||
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
@ -30,22 +30,28 @@ class CaffeConvBaseParser {
|
|||
|
||||
virtual ~CaffeConvBaseParser() {}
|
||||
|
||||
STATUS ParsePads(const caffe::ConvolutionParameter &conv_param, std::vector<int64_t> *pad);
|
||||
STATUS ParsePads(const caffe::ConvolutionParameter &conv_param,
|
||||
std::vector<int64_t> *pad);
|
||||
|
||||
STATUS ParseStrides(const caffe::ConvolutionParameter &conv_param, std::vector<int64_t> *stride);
|
||||
STATUS ParseStrides(const caffe::ConvolutionParameter &conv_param,
|
||||
std::vector<int64_t> *stride);
|
||||
|
||||
STATUS ParseDilations(const caffe::ConvolutionParameter &conv_param, std::vector<int64_t> *dilation);
|
||||
STATUS ParseDilations(const caffe::ConvolutionParameter &conv_param,
|
||||
std::vector<int64_t> *dilation);
|
||||
|
||||
STATUS ParseKernels(const caffe::ConvolutionParameter &conv_param, std::vector<int64_t> *kernel);
|
||||
STATUS ParseKernels(const caffe::ConvolutionParameter &conv_param,
|
||||
std::vector<int64_t> *kernel);
|
||||
|
||||
int ParseGroup(const caffe::ConvolutionParameter &convParam, const std::string &layerType);
|
||||
int ParseGroup(const caffe::ConvolutionParameter &convParam,
|
||||
const std::string &layerType);
|
||||
|
||||
int ParseChannelOut(const caffe::ConvolutionParameter &convParam, int32_t *channelOut);
|
||||
|
||||
STATUS ParseWeight(const caffe::LayerParameter &weight, std::vector<schema::TensorT *> *weightVec);
|
||||
STATUS ParseWeight(const caffe::LayerParameter &weight,
|
||||
std::vector<schema::TensorT *> *weightVec);
|
||||
};
|
||||
} // namespace lite
|
||||
} // namespace mindspore
|
||||
|
||||
#endif // MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_CONV_BASE_PARSER_H_
|
||||
#endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_CONV_BASE_PARSER_H_
|
||||
|
||||
|
|
|
@ -14,8 +14,8 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_CONVERTER_H_
|
||||
#define MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_CONVERTER_H_
|
||||
#ifndef MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_CONVERTER_H_
|
||||
#define MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_CONVERTER_H_
|
||||
|
||||
#include <string>
|
||||
#include <memory>
|
||||
|
@ -32,5 +32,5 @@ class CaffeConverter : public Converter {
|
|||
};
|
||||
} // namespace mindspore::lite
|
||||
|
||||
#endif // MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_CONVERTER_H_
|
||||
#endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_CONVERTER_H_
|
||||
|
||||
|
|
|
@ -14,21 +14,23 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include <memory>
|
||||
#include "mindspore/lite/tools/converter/parser/caffe/caffe_convolution_parser.h"
|
||||
#include "utils/log_adapter.h"
|
||||
#include <memory>
|
||||
|
||||
namespace mindspore {
|
||||
namespace lite {
|
||||
void CaffeConvolutionParser::ParseGroupConvolution(schema::CNodeT *op, schema::Conv2DT *attr) {
|
||||
if (attr == nullptr || attr->group == 1) {
|
||||
return;
|
||||
STATUS CaffeConvolutionParser::ParseGroupConvolution(schema::CNodeT *op,
|
||||
schema::Conv2DT *attr) {
|
||||
if (attr->group == 1) {
|
||||
return RET_OK;
|
||||
}
|
||||
std::unique_ptr<schema::DepthwiseConv2DT> depthwiseConv2DParam = std::make_unique<schema::DepthwiseConv2DT>();
|
||||
std::unique_ptr<schema::DepthwiseConv2DT> depthwiseConv2DParam
|
||||
= std::make_unique<schema::DepthwiseConv2DT>();
|
||||
if (depthwiseConv2DParam == nullptr) {
|
||||
MS_LOG(ERROR) << "new DepthwiseConv2DT failed";
|
||||
return;
|
||||
MS_LOG(ERROR) << "new op failed";
|
||||
return RET_ERROR;
|
||||
}
|
||||
|
||||
depthwiseConv2DParam->format = attr->format;
|
||||
depthwiseConv2DParam->channelIn = attr->channelIn;
|
||||
depthwiseConv2DParam->channelMultiplier = attr->channelOut / attr->channelIn;
|
||||
|
@ -48,19 +50,30 @@ void CaffeConvolutionParser::ParseGroupConvolution(schema::CNodeT *op, schema::C
|
|||
delete attr;
|
||||
op->primitive->value.type = schema::PrimitiveType_DepthwiseConv2D;
|
||||
op->primitive->value.value = depthwiseConv2DParam.release();
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
STATUS CaffeConvolutionParser::Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight,
|
||||
schema::CNodeT *op, std::vector<schema::TensorT *> *weightVec) {
|
||||
op->name = proto.name();
|
||||
std::unique_ptr<schema::Conv2DT> attr(new (std::nothrow) schema::Conv2DT());
|
||||
if (attr == nullptr) {
|
||||
MS_LOG(ERROR) << "new Conv2DT failed";
|
||||
return RET_ERROR;
|
||||
}
|
||||
attr->format = schema::Format_NCHW;
|
||||
const caffe::ConvolutionParameter convParam = proto.convolution_param();
|
||||
STATUS CaffeConvolutionParser::Parse(const caffe::LayerParameter &proto,
|
||||
const caffe::LayerParameter &weight,
|
||||
schema::CNodeT *op,
|
||||
std::vector<schema::TensorT *> *weightVec) {
|
||||
MS_LOG(DEBUG) << "parse CaffeConvolutionParser";
|
||||
|
||||
if (op == nullptr) {
|
||||
MS_LOG(ERROR) << "op is null";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
op->primitive = std::make_unique<schema::PrimitiveT>();
|
||||
if (op->primitive == nullptr) {
|
||||
MS_LOG(ERROR) << "op->primitive is null";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
|
||||
std::unique_ptr<schema::Conv2DT> attr(new (std::nothrow) schema::Conv2DT());
|
||||
|
||||
attr->format = schema::Format_NCHW;
|
||||
|
||||
const caffe::ConvolutionParameter convParam = proto.convolution_param();
|
||||
CaffeConvBaseParser convParser;
|
||||
// parse pad
|
||||
std::vector<int64_t> pad(4, 0);
|
||||
|
@ -119,14 +132,21 @@ STATUS CaffeConvolutionParser::Parse(const caffe::LayerParameter &proto, const c
|
|||
attr->channelIn = weightBlob.channels() * attr->group;
|
||||
}
|
||||
attr->padMode = schema::PadMode_CAFFE;
|
||||
op->primitive = std::make_unique<schema::PrimitiveT>();
|
||||
|
||||
op->name = proto.name();
|
||||
op->primitive->value.type = schema::PrimitiveType_Conv2D;
|
||||
op->primitive->value.value = attr.get();
|
||||
|
||||
ParseGroupConvolution(op, attr.release());
|
||||
status = ParseGroupConvolution(op, attr.release());
|
||||
if (status != RET_OK) {
|
||||
MS_LOG(ERROR) << "Parse group convolution failed";
|
||||
return RET_ERROR;
|
||||
}
|
||||
|
||||
status = convParser.ParseWeight(weight, weightVec);
|
||||
if (status != RET_OK) {
|
||||
MS_LOG(ERROR) << "ParseWeight for " << proto.name().c_str() << " failed";
|
||||
return RET_ERROR;
|
||||
}
|
||||
|
||||
return status;
|
||||
|
|
|
@ -14,8 +14,8 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_CONVOLUTION_PARSER_H_
|
||||
#define MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_CONVOLUTION_PARSER_H_
|
||||
#ifndef MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_CONVOLUTION_PARSER_H_
|
||||
#define MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_CONVOLUTION_PARSER_H_
|
||||
|
||||
#include <vector>
|
||||
#include "mindspore/lite/tools/converter/parser/caffe/caffe_node_parser.h"
|
||||
|
@ -28,14 +28,17 @@ class CaffeConvolutionParser : public CaffeNodeParser {
|
|||
public:
|
||||
CaffeConvolutionParser() : CaffeNodeParser("convolution") {}
|
||||
|
||||
STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op,
|
||||
STATUS Parse(const caffe::LayerParameter &proto,
|
||||
const caffe::LayerParameter &weight,
|
||||
schema::CNodeT *op,
|
||||
std::vector<schema::TensorT *> *weightVec) override;
|
||||
|
||||
private:
|
||||
void ParseGroupConvolution(schema::CNodeT *op, schema::Conv2DT *attr);
|
||||
STATUS ParseGroupConvolution(schema::CNodeT *op,
|
||||
schema::Conv2DT *attr);
|
||||
};
|
||||
} // namespace lite
|
||||
} // namespace mindspore
|
||||
|
||||
#endif // MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_CONVOLUTION_PARSER_H_
|
||||
#endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_CONVOLUTION_PARSER_H_
|
||||
|
||||
|
|
|
@ -14,8 +14,8 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include <memory>
|
||||
#include "mindspore/lite/tools/converter/parser/caffe/caffe_crop_parser.h"
|
||||
#include <memory>
|
||||
|
||||
const int32_t CROP_AXIS = 2;
|
||||
|
||||
|
@ -25,7 +25,23 @@ STATUS CaffeCropParser::Parse(const caffe::LayerParameter &proto,
|
|||
const caffe::LayerParameter &weight,
|
||||
schema::CNodeT *op,
|
||||
std::vector<schema::TensorT *> *weightVec) {
|
||||
MS_LOG(DEBUG) << "parse CaffeCropParser";
|
||||
if (op == nullptr) {
|
||||
MS_LOG(ERROR) << "op is null";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
op->primitive = std::make_unique<schema::PrimitiveT>();
|
||||
if (op->primitive == nullptr) {
|
||||
MS_LOG(ERROR) << "op->primitive is null";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
|
||||
std::unique_ptr<schema::CropT> attr = std::make_unique<schema::CropT>();
|
||||
if (attr == nullptr) {
|
||||
MS_LOG(ERROR) << "new op failed";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
|
||||
if (!proto.has_crop_param()) {
|
||||
attr->axis = CROP_AXIS;
|
||||
std::vector<int64_t> offsets(2, 0);
|
||||
|
@ -34,7 +50,7 @@ STATUS CaffeCropParser::Parse(const caffe::LayerParameter &proto,
|
|||
const caffe::CropParameter cropParam = proto.crop_param();
|
||||
if (cropParam.has_axis()) {
|
||||
if (cropParam.axis() == -1) {
|
||||
// MS_LOGW("axis with -1 may lead to calculation errors when input less than 4 dims.");
|
||||
MS_LOG(WARNING) << "axis with -1 may lead to calculation errors when input less than 4 dims.";
|
||||
}
|
||||
attr->axis = cropParam.axis();
|
||||
} else {
|
||||
|
@ -49,9 +65,10 @@ STATUS CaffeCropParser::Parse(const caffe::LayerParameter &proto,
|
|||
attr->offsets = offsets;
|
||||
}
|
||||
}
|
||||
op->primitive = std::make_unique<schema::PrimitiveT>();
|
||||
op->primitive->value.value = attr.release();
|
||||
|
||||
op->name = proto.name();
|
||||
op->primitive->value.type = schema::PrimitiveType_Crop;
|
||||
op->primitive->value.value = attr.release();
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
|
|
|
@ -14,8 +14,8 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_CROP_PARSER_H_
|
||||
#define MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_CROP_PARSER_H_
|
||||
#ifndef MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_CROP_PARSER_H_
|
||||
#define MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_CROP_PARSER_H_
|
||||
|
||||
#include <vector>
|
||||
#include "mindspore/lite/tools/converter/parser/caffe/caffe_node_parser.h"
|
||||
|
@ -27,11 +27,13 @@ class CaffeCropParser : public CaffeNodeParser {
|
|||
public:
|
||||
CaffeCropParser() : CaffeNodeParser("crop") {}
|
||||
|
||||
STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op,
|
||||
STATUS Parse(const caffe::LayerParameter &proto,
|
||||
const caffe::LayerParameter &weight,
|
||||
schema::CNodeT *op,
|
||||
std::vector<schema::TensorT *> *weightVec) override;
|
||||
};
|
||||
} // namespace lite
|
||||
} // namespace mindspore
|
||||
|
||||
#endif // MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_CROP_PARSER_H_
|
||||
#endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_CROP_PARSER_H_
|
||||
|
||||
|
|
|
@ -14,21 +14,22 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include <memory>
|
||||
#include "mindspore/lite/tools/converter/parser/caffe/caffe_deconvolution_parser.h"
|
||||
#include <memory>
|
||||
|
||||
namespace mindspore {
|
||||
namespace lite {
|
||||
void CaffeDeconvolutionParser::ParseGroupDeconvolution(schema::CNodeT *op, schema::DeConv2DT *attr) {
|
||||
if (attr == nullptr || attr->group == 1) {
|
||||
return;
|
||||
STATUS CaffeDeconvolutionParser::ParseGroupDeconvolution(schema::CNodeT *op,
|
||||
schema::DeConv2DT *attr) {
|
||||
if (attr->group == 1) {
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
std::unique_ptr<schema::DeDepthwiseConv2DT> deDepthwiseConv2DParam
|
||||
= std::make_unique<schema::DeDepthwiseConv2DT>();
|
||||
if (deDepthwiseConv2DParam == nullptr) {
|
||||
MS_LOG(ERROR) << "new DeDepthwiseConv2DT failed";
|
||||
return;
|
||||
MS_LOG(ERROR) << "new op failed";
|
||||
return RET_ERROR;
|
||||
}
|
||||
deDepthwiseConv2DParam->format = attr->format;
|
||||
deDepthwiseConv2DParam->channelIn = attr->channelOut;
|
||||
|
@ -49,14 +50,30 @@ void CaffeDeconvolutionParser::ParseGroupDeconvolution(schema::CNodeT *op, schem
|
|||
delete attr;
|
||||
op->primitive->value.type = schema::PrimitiveType_DeDepthwiseConv2D;
|
||||
op->primitive->value.value = deDepthwiseConv2DParam.release();
|
||||
return RET_OK;
|
||||
}
|
||||
STATUS CaffeDeconvolutionParser::Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight,
|
||||
schema::CNodeT *op, std::vector<schema::TensorT *> *weightVec) {
|
||||
op->name = proto.name();
|
||||
auto *attr = new schema::DeConv2DT();
|
||||
attr->format = schema::Format_NCHW;
|
||||
const caffe::ConvolutionParameter convParam = proto.convolution_param();
|
||||
|
||||
STATUS CaffeDeconvolutionParser::Parse(const caffe::LayerParameter &proto,
|
||||
const caffe::LayerParameter &weight,
|
||||
schema::CNodeT *op,
|
||||
std::vector<schema::TensorT *> *weightVec) {
|
||||
MS_LOG(DEBUG) << "parse CaffeDeconvolutionParser";
|
||||
|
||||
if (op == nullptr) {
|
||||
MS_LOG(ERROR) << "op is null";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
op->primitive = std::make_unique<schema::PrimitiveT>();
|
||||
if (op->primitive == nullptr) {
|
||||
MS_LOG(ERROR) << "op->primitive is null";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
|
||||
std::unique_ptr<schema::DeConv2DT> attr(new (std::nothrow) schema::DeConv2DT());
|
||||
|
||||
attr->format = schema::Format_NCHW;
|
||||
|
||||
const caffe::ConvolutionParameter convParam = proto.convolution_param();
|
||||
CaffeConvBaseParser convParser;
|
||||
// parse pad
|
||||
std::vector<int64_t> pad(4, 0);
|
||||
|
@ -118,13 +135,21 @@ STATUS CaffeDeconvolutionParser::Parse(const caffe::LayerParameter &proto, const
|
|||
attr->channelIn = weightBlob.num() * attr->group;
|
||||
}
|
||||
attr->padMode = schema::PadMode_CAFFE;
|
||||
op->primitive = std::make_unique<schema::PrimitiveT>();
|
||||
|
||||
op->name = proto.name();
|
||||
op->primitive->value.type = schema::PrimitiveType_DeConv2D;
|
||||
op->primitive->value.value = attr;
|
||||
ParseGroupDeconvolution(op, attr);
|
||||
op->primitive->value.value = attr.get();
|
||||
|
||||
status = ParseGroupDeconvolution(op, attr.release());
|
||||
if (status != RET_OK) {
|
||||
MS_LOG(ERROR) << "Parse group deconvolution failed";
|
||||
return RET_ERROR;
|
||||
}
|
||||
|
||||
status = convParser.ParseWeight(weight, weightVec);
|
||||
if (status != RET_OK) {
|
||||
MS_LOG(ERROR) << "ParseWeight for " << proto.name().c_str() << " failed";
|
||||
return RET_ERROR;
|
||||
}
|
||||
|
||||
return status;
|
||||
|
|
|
@ -14,8 +14,8 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_DECONVOLUTION_PARSER_H_
|
||||
#define MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_DECONVOLUTION_PARSER_H_
|
||||
#ifndef MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_DECONVOLUTION_PARSER_H_
|
||||
#define MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_DECONVOLUTION_PARSER_H_
|
||||
|
||||
#include <vector>
|
||||
#include "mindspore/lite/tools/converter/parser/caffe/caffe_node_parser.h"
|
||||
|
@ -28,14 +28,17 @@ class CaffeDeconvolutionParser : public CaffeNodeParser {
|
|||
public:
|
||||
CaffeDeconvolutionParser() : CaffeNodeParser("deconvolution") {}
|
||||
|
||||
STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op,
|
||||
STATUS Parse(const caffe::LayerParameter &proto,
|
||||
const caffe::LayerParameter &weight,
|
||||
schema::CNodeT *op,
|
||||
std::vector<schema::TensorT *> *weightVec) override;
|
||||
|
||||
private:
|
||||
void ParseGroupDeconvolution(schema::CNodeT *op, schema::DeConv2DT *attr);
|
||||
STATUS ParseGroupDeconvolution(schema::CNodeT *op,
|
||||
schema::DeConv2DT *attr);
|
||||
};
|
||||
} // namespace lite
|
||||
} // namespace mindspore
|
||||
|
||||
#endif // MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_DECONVOLUTION_PARSER_H_
|
||||
#endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_DECONVOLUTION_PARSER_H_
|
||||
|
||||
|
|
|
@ -14,19 +14,36 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "mindspore/lite/tools/converter/parser/caffe/caffe_eltwise_parser.h"
|
||||
#include <cmath>
|
||||
#include <memory>
|
||||
#include "mindspore/lite/tools/converter/parser/caffe/caffe_eltwise_parser.h"
|
||||
#include "utils/log_adapter.h"
|
||||
|
||||
const int ELTWISE_MIN_INPUT_SIZE = 2;
|
||||
const float ELTWISE_SUM_COEFF_EPSILON = 1e-5;
|
||||
|
||||
namespace mindspore {
|
||||
namespace lite {
|
||||
STATUS CaffeEltwiseParser::Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight,
|
||||
schema::CNodeT *op, std::vector<schema::TensorT *> *weightVec) {
|
||||
STATUS CaffeEltwiseParser::Parse(const caffe::LayerParameter &proto,
|
||||
const caffe::LayerParameter &weight,
|
||||
schema::CNodeT *op,
|
||||
std::vector<schema::TensorT *> *weightVec) {
|
||||
MS_LOG(DEBUG) << "parse CaffeEltwiseParser";
|
||||
if (op == nullptr) {
|
||||
MS_LOG(ERROR) << "op is null";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
op->primitive = std::make_unique<schema::PrimitiveT>();
|
||||
if (op->primitive == nullptr) {
|
||||
MS_LOG(ERROR) << "op->primitive is null";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
|
||||
std::unique_ptr<schema::EltwiseT> attr = std::make_unique<schema::EltwiseT>();
|
||||
if (attr == nullptr) {
|
||||
MS_LOG(ERROR) << "new op failed";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
|
||||
if (proto.bottom_size() < ELTWISE_MIN_INPUT_SIZE) {
|
||||
MS_LOG(ERROR) << "Eltwise Op " << proto.name() << " need at least 2 inputs,but input size is "
|
||||
<< proto.bottom_size();
|
||||
|
@ -37,7 +54,7 @@ STATUS CaffeEltwiseParser::Parse(const caffe::LayerParameter &proto, const caffe
|
|||
if (eltwiseParam.coeff_size() != 0 && eltwiseParam.coeff_size() != proto.bottom_size()) {
|
||||
MS_LOG(ERROR) << "Coeff size(" << eltwiseParam.coeff_size()
|
||||
<< ") check fail, Eltwise Layer takes one coefficient per bottom blob.";
|
||||
return RET_PARAM_INVALID;
|
||||
return RET_ERROR;
|
||||
}
|
||||
|
||||
if (eltwiseParam.operation() == caffe::EltwiseParameter::PROD && eltwiseParam.coeff_size() != 0) {
|
||||
|
@ -64,12 +81,13 @@ STATUS CaffeEltwiseParser::Parse(const caffe::LayerParameter &proto, const caffe
|
|||
break;
|
||||
default:
|
||||
MS_LOG(ERROR) << "Eltwise parse params fail, unsupported opration: " << eltwiseParam.operation();
|
||||
return RET_PARAM_INVALID;
|
||||
return RET_ERROR;
|
||||
}
|
||||
} else {
|
||||
attr->mode = schema::EltwiseMode_SUM;
|
||||
}
|
||||
op->primitive = std::make_unique<schema::PrimitiveT>();
|
||||
|
||||
op->name = proto.name();
|
||||
op->primitive->value.type = schema::PrimitiveType_Eltwise;
|
||||
op->primitive->value.value = attr.release();
|
||||
return RET_OK;
|
||||
|
|
|
@ -14,8 +14,8 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_ELTWISE_PARSER_H_
|
||||
#define MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_ELTWISE_PARSER_H_
|
||||
#ifndef MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_ELTWISE_PARSER_H_
|
||||
#define MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_ELTWISE_PARSER_H_
|
||||
|
||||
#include <vector>
|
||||
#include "mindspore/lite/tools/converter/parser/caffe/caffe_node_parser.h"
|
||||
|
@ -27,11 +27,13 @@ class CaffeEltwiseParser : public CaffeNodeParser {
|
|||
public:
|
||||
CaffeEltwiseParser() : CaffeNodeParser("eltwise") {}
|
||||
|
||||
STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op,
|
||||
STATUS Parse(const caffe::LayerParameter &proto,
|
||||
const caffe::LayerParameter &weight,
|
||||
schema::CNodeT *op,
|
||||
std::vector<schema::TensorT *> *weightVec) override;
|
||||
};
|
||||
} // namespace lite
|
||||
} // namespace mindspore
|
||||
|
||||
#endif // MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_ELTWISE_PARSER_H_
|
||||
#endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_ELTWISE_PARSER_H_
|
||||
|
||||
|
|
|
@ -13,20 +13,34 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#include <memory>
|
||||
|
||||
#include "mindspore/lite/tools/converter/parser/caffe/caffe_flatten_parser.h"
|
||||
#include <memory>
|
||||
|
||||
namespace mindspore {
|
||||
namespace lite {
|
||||
STATUS CaffeFlattenParser::Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight,
|
||||
schema::CNodeT *op, std::vector<schema::TensorT *> *weightVec) {
|
||||
STATUS CaffeFlattenParser::Parse(const caffe::LayerParameter &proto,
|
||||
const caffe::LayerParameter &weight,
|
||||
schema::CNodeT *op,
|
||||
std::vector<schema::TensorT *> *weightVec) {
|
||||
MS_LOG(DEBUG) << "parse CaffeFlattenParser";
|
||||
if (op == nullptr) {
|
||||
// MS_LOG(ERROR) << "null pointer dereferencing.";
|
||||
MS_LOG(ERROR) << "op is null";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
std::unique_ptr<schema::FlattenT> attr = std::make_unique<schema::FlattenT>();
|
||||
|
||||
op->primitive = std::make_unique<schema::PrimitiveT>();
|
||||
if (op->primitive == nullptr) {
|
||||
MS_LOG(ERROR) << "op->primitive is null";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
|
||||
std::unique_ptr<schema::FlattenT> attr = std::make_unique<schema::FlattenT>();
|
||||
if (attr == nullptr) {
|
||||
MS_LOG(ERROR) << "new op failed";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
|
||||
op->name = proto.name();
|
||||
op->primitive->value.type = schema::PrimitiveType_Flatten;
|
||||
op->primitive->value.value = attr.release();
|
||||
return RET_OK;
|
||||
|
|
|
@ -14,8 +14,8 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef PREDICT_CONVERTER_PARSER_CAFFE_CAFFE_FLATTEN_PARSER_H_
|
||||
#define PREDICT_CONVERTER_PARSER_CAFFE_CAFFE_FLATTEN_PARSER_H_
|
||||
#ifndef MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_FLATTEN_PARSER_H_
|
||||
#define MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_FLATTEN_PARSER_H_
|
||||
|
||||
#include <vector>
|
||||
#include "mindspore/lite/tools/converter/parser/caffe/caffe_node_parser.h"
|
||||
|
@ -27,10 +27,12 @@ class CaffeFlattenParser : public CaffeNodeParser {
|
|||
public:
|
||||
CaffeFlattenParser() : CaffeNodeParser("flatten") {}
|
||||
|
||||
STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op,
|
||||
STATUS Parse(const caffe::LayerParameter &proto,
|
||||
const caffe::LayerParameter &weight,
|
||||
schema::CNodeT *op,
|
||||
std::vector<schema::TensorT *> *weightVec) override;
|
||||
};
|
||||
} // namespace lite
|
||||
} // namespace mindspore
|
||||
|
||||
#endif // PREDICT_CONVERTER_PARSER_CAFFE_CAFFE_FLATTEN_PARSER_H_
|
||||
#endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_FLATTEN_PARSER_H_
|
||||
|
|
|
@ -14,18 +14,35 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include <memory>
|
||||
#include "mindspore/lite/tools/converter/parser/caffe/caffe_innerproduct_parser.h"
|
||||
#include <memory>
|
||||
|
||||
namespace mindspore {
|
||||
namespace lite {
|
||||
STATUS CaffeInnerProductParser::Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight,
|
||||
schema::CNodeT *op, std::vector<schema::TensorT *> *weightVec) {
|
||||
const caffe::InnerProductParameter innerProductParam = proto.inner_product_param();
|
||||
std::unique_ptr<schema::FullConnectionT> attr = std::make_unique<schema::FullConnectionT>();
|
||||
STATUS CaffeInnerProductParser::Parse(const caffe::LayerParameter &proto,
|
||||
const caffe::LayerParameter &weight,
|
||||
schema::CNodeT *op,
|
||||
std::vector<schema::TensorT *> *weightVec) {
|
||||
MS_LOG(DEBUG) << "parse CaffeInnerProductParser";
|
||||
if (op == nullptr) {
|
||||
MS_LOG(ERROR) << "op is null";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
op->primitive = std::make_unique<schema::PrimitiveT>();
|
||||
if (op->primitive == nullptr) {
|
||||
MS_LOG(ERROR) << "op->primitive is null";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
|
||||
std::unique_ptr<schema::FullConnectionT> attr = std::make_unique<schema::FullConnectionT>();
|
||||
if (attr == nullptr) {
|
||||
MS_LOG(ERROR) << "new op failed";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
|
||||
const caffe::InnerProductParameter innerProductParam = proto.inner_product_param();
|
||||
if (!innerProductParam.has_num_output()) {
|
||||
// MS_LOGE("InnerProduct Parse num_output for %s failed.", proto.name().c_str());
|
||||
MS_LOG(ERROR) << "InnerProduct Parse num_output for " << proto.name().c_str() << " failed.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
|
||||
|
@ -33,7 +50,7 @@ STATUS CaffeInnerProductParser::Parse(const caffe::LayerParameter &proto, const
|
|||
attr->axis = 1;
|
||||
attr->useAxis = true;
|
||||
} else {
|
||||
// MS_LOG(ERROR) << "InnerProduct Parse axis only support default 1, but actually " << innerProductParam.axis();
|
||||
MS_LOG(ERROR) << "InnerProduct Parse axis only support default 1, but actually " << innerProductParam.axis();
|
||||
return RET_ERROR;
|
||||
}
|
||||
|
||||
|
@ -44,14 +61,14 @@ STATUS CaffeInnerProductParser::Parse(const caffe::LayerParameter &proto, const
|
|||
|
||||
// parse weight
|
||||
if (weight.blobs_size() == 0) {
|
||||
// MS_LOGE("InnerProduct No filter data in layer %s", weight.name().c_str());
|
||||
MS_LOG(ERROR) << "InnerProduct No filter data in layer " << weight.name().c_str();
|
||||
return RET_ERROR;
|
||||
}
|
||||
|
||||
// parse filter
|
||||
auto filter = ConvertWeight(weight.blobs(0));
|
||||
if (filter == nullptr) {
|
||||
// MS_LOGE("InnerProduct parse weight for layer %s failed", weight.name().c_str());
|
||||
MS_LOG(ERROR) << "InnerProduct parse weight for layer " << weight.name().c_str() << " failed";
|
||||
return RET_ERROR;
|
||||
}
|
||||
weightVec->push_back(filter);
|
||||
|
@ -60,14 +77,15 @@ STATUS CaffeInnerProductParser::Parse(const caffe::LayerParameter &proto, const
|
|||
if (innerProductParam.bias_term() && weight.blobs_size() > 1) {
|
||||
auto bias = ConvertWeight(weight.blobs(1));
|
||||
if (bias == nullptr) {
|
||||
// MS_LOGE("InnerProduct parse bias for layer %s failed", weight.name().c_str());
|
||||
MS_LOG(ERROR) << "InnerProduct parse bias for layer " << weight.name().c_str() << " failed";
|
||||
return RET_ERROR;
|
||||
}
|
||||
weightVec->push_back(bias);
|
||||
}
|
||||
op->primitive = std::make_unique<schema::PrimitiveT>();
|
||||
op->primitive->value.value = attr.release();
|
||||
|
||||
op->name = proto.name();
|
||||
op->primitive->value.type = schema::PrimitiveType_FullConnection;
|
||||
op->primitive->value.value = attr.release();
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
|
|
|
@ -14,8 +14,8 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_INNERPRODUCT_PARSER_H_
|
||||
#define MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_INNERPRODUCT_PARSER_H_
|
||||
#ifndef MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_INNERPRODUCT_PARSER_H_
|
||||
#define MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_INNERPRODUCT_PARSER_H_
|
||||
|
||||
#include <vector>
|
||||
#include "mindspore/lite/tools/converter/parser/caffe/caffe_node_parser.h"
|
||||
|
@ -27,11 +27,13 @@ class CaffeInnerProductParser : public CaffeNodeParser {
|
|||
public:
|
||||
CaffeInnerProductParser() : CaffeNodeParser("innerproduct") {}
|
||||
|
||||
STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op,
|
||||
STATUS Parse(const caffe::LayerParameter &proto,
|
||||
const caffe::LayerParameter &weight,
|
||||
schema::CNodeT *op,
|
||||
std::vector<schema::TensorT *> *weightVec) override;
|
||||
};
|
||||
} // namespace lite
|
||||
} // namespace mindspore
|
||||
|
||||
#endif // MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_INNERPRODUCT_PARSER_H_
|
||||
#endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_INNERPRODUCT_PARSER_H_
|
||||
|
||||
|
|
|
@ -32,6 +32,7 @@ STATUS CaffeInspector::InspectModel(const caffe::NetParameter &proto) {
|
|||
SetTopsAndBottoms();
|
||||
|
||||
FindInputAndOutput();
|
||||
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
|
|
|
@ -14,8 +14,8 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_INSPECTOR_H_
|
||||
#define MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_INSPECTOR_H_
|
||||
#ifndef MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_INSPECTOR_H_
|
||||
#define MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_INSPECTOR_H_
|
||||
|
||||
#include <set>
|
||||
#include <string>
|
||||
|
@ -52,5 +52,5 @@ using CaffeInspectorPtr = std::shared_ptr<CaffeInspector>;
|
|||
} // namespace lite
|
||||
} // namespace mindspore
|
||||
|
||||
#endif // MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_INSPECTOR_H_
|
||||
#endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_INSPECTOR_H_
|
||||
|
||||
|
|
|
@ -14,19 +14,37 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include <memory>
|
||||
#include "mindspore/lite/tools/converter/parser/caffe/caffe_interp_parser.h"
|
||||
#include <memory>
|
||||
|
||||
namespace mindspore {
|
||||
namespace lite {
|
||||
STATUS CaffeInterpParser::Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight,
|
||||
schema::CNodeT *op, std::vector<schema::TensorT *> *weightVec) {
|
||||
STATUS CaffeInterpParser::Parse(const caffe::LayerParameter &proto,
|
||||
const caffe::LayerParameter &weight,
|
||||
schema::CNodeT *op,
|
||||
std::vector<schema::TensorT *> *weightVec) {
|
||||
MS_LOG(DEBUG) << "parse CaffeInterpParser";
|
||||
if (op == nullptr) {
|
||||
MS_LOG(ERROR) << "op is null";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
op->primitive = std::make_unique<schema::PrimitiveT>();
|
||||
if (op->primitive == nullptr) {
|
||||
MS_LOG(ERROR) << "op->primitive is null";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
|
||||
std::unique_ptr<schema::ResizeT> attr = std::make_unique<schema::ResizeT>();
|
||||
if (attr == nullptr) {
|
||||
MS_LOG(ERROR) << "new op failed";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
|
||||
const caffe::InterpParameter interpParam = proto.interp_param();
|
||||
if (interpParam.has_height()) {
|
||||
int64_t height = interpParam.height();
|
||||
if (height < 0) {
|
||||
// MS_LOGE("Interp height must be > 0");
|
||||
MS_LOG(ERROR) << "Interp height must be > 0";
|
||||
return RET_ERROR;
|
||||
}
|
||||
attr->newHeight = height;
|
||||
|
@ -35,17 +53,15 @@ STATUS CaffeInterpParser::Parse(const caffe::LayerParameter &proto, const caffe:
|
|||
if (interpParam.has_width()) {
|
||||
int64_t width = interpParam.width();
|
||||
if (width < 0) {
|
||||
// MS_LOGE("Interp width must be > 0");
|
||||
MS_LOG(ERROR) << "Interp width must be > 0";
|
||||
return RET_ERROR;
|
||||
}
|
||||
attr->newWidth = width;
|
||||
}
|
||||
|
||||
attr->alignCorners = true;
|
||||
attr->method = schema::ResizeMethod_BILINEAR;
|
||||
|
||||
op->name = proto.name();
|
||||
op->primitive = std::make_unique<schema::PrimitiveT>();
|
||||
op->primitive->value.type = schema::PrimitiveType_Resize;
|
||||
op->primitive->value.value = attr.release();
|
||||
return RET_OK;
|
||||
|
|
|
@ -14,8 +14,8 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_INTERP_PARSER_H_
|
||||
#define MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_INTERP_PARSER_H_
|
||||
#ifndef MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_INTERP_PARSER_H_
|
||||
#define MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_INTERP_PARSER_H_
|
||||
|
||||
#include <vector>
|
||||
#include "mindspore/lite/tools/converter/parser/caffe/caffe_node_parser.h"
|
||||
|
@ -27,11 +27,13 @@ class CaffeInterpParser : public CaffeNodeParser {
|
|||
public:
|
||||
CaffeInterpParser() : CaffeNodeParser("Interp") {}
|
||||
|
||||
STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op,
|
||||
STATUS Parse(const caffe::LayerParameter &proto,
|
||||
const caffe::LayerParameter &weight,
|
||||
schema::CNodeT *op,
|
||||
std::vector<schema::TensorT *> *weightVec) override;
|
||||
};
|
||||
} // namespace lite
|
||||
} // namespace mindspore
|
||||
|
||||
#endif // MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_INTERP_PARSER_H_
|
||||
#endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_INTERP_PARSER_H_
|
||||
|
||||
|
|
|
@ -31,10 +31,9 @@ CaffeModelParser::~CaffeModelParser() {}
|
|||
|
||||
const std::set<std::string> CaffeModelParser::skipedLayerType = {"Dropout"};
|
||||
|
||||
schema::MetaGraphT *CaffeModelParser::Parse(const std::string &modelFile, const std::string &weightFile,
|
||||
schema::MetaGraphT *CaffeModelParser::Parse(const std::string &modelFile,
|
||||
const std::string &weightFile,
|
||||
const QuantType &quantType) {
|
||||
// std::unique_ptr<schema::MetaGraphT> graph = std::make_unique<schema::MetaGraphT>();
|
||||
|
||||
if (ValidateFileStr(modelFile, ".prototxt") != RET_OK) {
|
||||
MS_LOG(ERROR) << "INPUT ILLEGAL: modelFile must be *.prototxt";
|
||||
return nullptr;
|
||||
|
@ -78,38 +77,35 @@ schema::MetaGraphT *CaffeModelParser::Parse(const std::string &modelFile, const
|
|||
return nullptr;
|
||||
}
|
||||
|
||||
// set inputTensor index and outputTensor index for the whole graph
|
||||
status = SetGraphTensorIndex(proto, &tensorCache, subGraphDef.get());
|
||||
if (status != RET_OK) {
|
||||
MS_LOG(ERROR) << "Set inputTensor index and outputTensor index for graph failed!";
|
||||
return nullptr;
|
||||
}
|
||||
subGraphDef->name = GetModelName(modelFile);
|
||||
// set all tensors to graph
|
||||
SetAllTensors(tensorCache, subGraphDef.get());
|
||||
// graph = move(subGraphDef);
|
||||
|
||||
// ConvertCaffeBatchNorm(graph.get());
|
||||
SetAllTensors(tensorCache, subGraphDef.get());
|
||||
|
||||
return subGraphDef.release();
|
||||
// return Fb2Anf(graph.release());
|
||||
}
|
||||
|
||||
STATUS CaffeModelParser::SetOpInputIdx(const caffe::LayerParameter &layer, schema::CNodeT *op,
|
||||
STATUS CaffeModelParser::SetOpInputIdx(const caffe::LayerParameter &layer,
|
||||
schema::CNodeT *op,
|
||||
TensorCache *tensorCache) {
|
||||
for (int i = 0; i < layer.bottom_size(); i++) {
|
||||
int index = tensorCache->FindTensor(layer.bottom(i));
|
||||
if (index >= 0) {
|
||||
op->inputIndex.emplace_back(index);
|
||||
} else {
|
||||
// MS_LOGE("Can't find input layer for %s.", layer.name().c_str());
|
||||
MS_LOG(ERROR) << "Can't find input layer for " << layer.name().c_str();
|
||||
return RET_ERROR;
|
||||
}
|
||||
}
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
STATUS CaffeModelParser::SetOpOutputIdx(const caffe::LayerParameter &layer, schema::CNodeT *op,
|
||||
STATUS CaffeModelParser::SetOpOutputIdx(const caffe::LayerParameter &layer,
|
||||
schema::CNodeT *op,
|
||||
TensorCache *tensorCache) {
|
||||
for (int i = 0; i < layer.top_size(); i++) {
|
||||
std::unique_ptr<schema::TensorT> msTensor = std::make_unique<schema::TensorT>();
|
||||
|
@ -118,7 +114,8 @@ STATUS CaffeModelParser::SetOpOutputIdx(const caffe::LayerParameter &layer, sche
|
|||
return RET_OK;
|
||||
}
|
||||
|
||||
STATUS CaffeModelParser::SetWeightTensor(const std::vector<schema::TensorT *> &weightVec, schema::CNodeT *op,
|
||||
STATUS CaffeModelParser::SetWeightTensor(const std::vector<schema::TensorT *> &weightVec,
|
||||
schema::CNodeT *op,
|
||||
TensorCache *tensorCache) {
|
||||
for (auto iter : weightVec) {
|
||||
op->inputIndex.emplace_back(tensorCache->AddTensor("Weight", iter, CONST));
|
||||
|
@ -126,7 +123,8 @@ STATUS CaffeModelParser::SetWeightTensor(const std::vector<schema::TensorT *> &w
|
|||
return RET_OK;
|
||||
}
|
||||
|
||||
STATUS CaffeModelParser::SetAllTensors(const TensorCache &tensorCache, schema::MetaGraphT *subGraphDef) {
|
||||
STATUS CaffeModelParser::SetAllTensors(const TensorCache &tensorCache,
|
||||
schema::MetaGraphT *subGraphDef) {
|
||||
std::vector<schema::TensorT *> tensors = tensorCache.GetCachedTensor();
|
||||
for (auto iter : tensors) {
|
||||
std::unique_ptr<schema::TensorT> temp(iter);
|
||||
|
@ -135,7 +133,8 @@ STATUS CaffeModelParser::SetAllTensors(const TensorCache &tensorCache, schema::M
|
|||
return RET_OK;
|
||||
}
|
||||
|
||||
STATUS CaffeModelParser::SetGraphTensorIndex(const caffe::NetParameter &proto, TensorCache *tensorCache,
|
||||
STATUS CaffeModelParser::SetGraphTensorIndex(const caffe::NetParameter &proto,
|
||||
TensorCache *tensorCache,
|
||||
schema::MetaGraphT *subGraphDef) {
|
||||
CaffeInspector caffeInspector;
|
||||
caffeInspector.InspectModel(proto);
|
||||
|
@ -144,7 +143,7 @@ STATUS CaffeModelParser::SetGraphTensorIndex(const caffe::NetParameter &proto, T
|
|||
if (index >= 0) {
|
||||
subGraphDef->inputIndex.emplace_back(index);
|
||||
} else {
|
||||
// MS_LOGE("Can't find input tensor layer for graph.");
|
||||
MS_LOG(ERROR) << "Can't find input tensor layer for graph.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
}
|
||||
|
@ -154,15 +153,17 @@ STATUS CaffeModelParser::SetGraphTensorIndex(const caffe::NetParameter &proto, T
|
|||
if (index >= 0) {
|
||||
subGraphDef->outputIndex.emplace_back(index);
|
||||
} else {
|
||||
// MS_LOGE("Can't find output tensor layer for graph.");
|
||||
MS_LOG(ERROR) << "Can't find output tensor layer for graph.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
}
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
STATUS CaffeModelParser::ParseLayer(const caffe::NetParameter &proto, const caffe::NetParameter &weight,
|
||||
TensorCache *tensorCache, schema::MetaGraphT *subGraphDef) {
|
||||
STATUS CaffeModelParser::ParseLayer(const caffe::NetParameter &proto,
|
||||
const caffe::NetParameter &weight,
|
||||
TensorCache *tensorCache,
|
||||
schema::MetaGraphT *subGraphDef) {
|
||||
for (int i = 0; i < proto.layer_size(); i++) {
|
||||
auto layer = proto.layer(i);
|
||||
|
||||
|
@ -192,11 +193,10 @@ STATUS CaffeModelParser::ParseLayer(const caffe::NetParameter &proto, const caff
|
|||
std::unique_ptr<schema::CNodeT> op = std::make_unique<schema::CNodeT>();
|
||||
op->name = layer.name();
|
||||
|
||||
// set op input index
|
||||
auto status = SetOpInputIdx(layer, op.get(), tensorCache);
|
||||
if (status != RET_OK) {
|
||||
MS_LOG(ERROR) << "Set Op " << layer.name() << " Input Index Failed!";
|
||||
return status;
|
||||
return RET_ERROR;
|
||||
}
|
||||
|
||||
auto nodeParser = CaffeNodeParserRegistry::GetInstance()->GetNodeParser(layer.type().c_str());
|
||||
|
@ -209,16 +209,15 @@ STATUS CaffeModelParser::ParseLayer(const caffe::NetParameter &proto, const caff
|
|||
status = nodeParser->Parse(layer, layerP, op.get(), &weightVec);
|
||||
if (status != RET_OK) {
|
||||
MS_LOG(ERROR) << "Parse weight for " << layer.name() << " Failed!";
|
||||
return status;
|
||||
return RET_ERROR;
|
||||
}
|
||||
// set op weight tensor to tensorcache
|
||||
|
||||
SetWeightTensor(weightVec, op.get(), tensorCache);
|
||||
|
||||
// set op output index
|
||||
status = SetOpOutputIdx(layer, op.get(), tensorCache);
|
||||
if (status != RET_OK) {
|
||||
MS_LOG(ERROR) << "Set Op " << layer.name() << " Output Index Failed!";
|
||||
return status;
|
||||
return RET_ERROR;
|
||||
}
|
||||
|
||||
// op->fmkType = FmkType_CAFFE;
|
||||
|
@ -228,7 +227,8 @@ STATUS CaffeModelParser::ParseLayer(const caffe::NetParameter &proto, const caff
|
|||
return RET_OK;
|
||||
}
|
||||
|
||||
STATUS CaffeModelParser::GetModelInput(const caffe::NetParameter &proto, TensorCache *tensorCache) {
|
||||
STATUS CaffeModelParser::GetModelInput(const caffe::NetParameter &proto,
|
||||
TensorCache *tensorCache) {
|
||||
for (int i = 0; i < proto.input_size(); i++) {
|
||||
if (proto.input_dim_size() <= 0) {
|
||||
continue;
|
||||
|
@ -254,51 +254,5 @@ STATUS CaffeModelParser::GetModelInput(const caffe::NetParameter &proto, TensorC
|
|||
}
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
void CaffeModelParser::ConvertCaffeBatchNorm(schema::MetaGraphT *meta_graph) {
|
||||
MS_ASSERT(meta_graph != nullptr);
|
||||
auto &nodes = meta_graph->nodes;
|
||||
for (auto &node : nodes) {
|
||||
if (node->primitive->value.type != schema::PrimitiveType_FusedBatchNorm) {
|
||||
continue;
|
||||
}
|
||||
MS_ASSERT(node->inputIndex.size() == 2);
|
||||
MS_ASSERT(node->inputIndex.back() < meta_graph->allTensors.size());
|
||||
auto &meanTensor = meta_graph->allTensors.at(node->inputIndex.back());
|
||||
MS_ASSERT(nullptr != meanTensor);
|
||||
auto shape = meanTensor->dims;
|
||||
auto shapeSize = GetShapeSize(shape);
|
||||
|
||||
auto scaleTensor = std::make_unique<schema::TensorT>();
|
||||
scaleTensor->dims = shape;
|
||||
scaleTensor->nodeType = NodeType_ValueNode;
|
||||
scaleTensor->refCount = 1;
|
||||
scaleTensor->format = schema::Format_NUM_OF_FORMAT;
|
||||
scaleTensor->dataType = TypeId::kNumberTypeFloat32;
|
||||
scaleTensor->data.resize(shapeSize * sizeof(float));
|
||||
auto scaleData = reinterpret_cast<float *>(scaleTensor->data.data());
|
||||
for (size_t i = 0; i < shapeSize; i++) {
|
||||
scaleData[i] = 1;
|
||||
}
|
||||
|
||||
auto biasTensor = std::make_unique<schema::TensorT>();
|
||||
biasTensor->dims = shape;
|
||||
biasTensor->nodeType = NodeType_ValueNode;
|
||||
biasTensor->refCount = 1;
|
||||
biasTensor->format = schema::Format_NUM_OF_FORMAT;
|
||||
biasTensor->dataType = TypeId::kNumberTypeInt32;
|
||||
biasTensor->data.resize(shapeSize * sizeof(int32_t));
|
||||
auto biasData = reinterpret_cast<int32_t *>(biasTensor->data.data());
|
||||
for (size_t i = 0; i < shapeSize; i++) {
|
||||
biasData[i] = 0;
|
||||
}
|
||||
|
||||
node->inputIndex.insert(node->inputIndex.begin() + 1, meta_graph->allTensors.size());
|
||||
meta_graph->allTensors.emplace_back(std::move(biasTensor));
|
||||
|
||||
node->inputIndex.insert(node->inputIndex.begin() + 1, meta_graph->allTensors.size());
|
||||
meta_graph->allTensors.emplace_back(std::move(scaleTensor));
|
||||
}
|
||||
}
|
||||
} // namespace lite
|
||||
} // namespace mindspore
|
||||
|
|
|
@ -14,8 +14,8 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_MODEL_PARSER_H_
|
||||
#define MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_MODEL_PARSER_H_
|
||||
#ifndef MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_MODEL_PARSER_H_
|
||||
#define MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_MODEL_PARSER_H_
|
||||
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
@ -37,8 +37,6 @@ class CaffeModelParser : public ModelParser {
|
|||
const QuantType &quantType = QuantType_QUANT_NONE) override;
|
||||
|
||||
private:
|
||||
void ConvertCaffeBatchNorm(MetaGraphT *meta_graphT);
|
||||
|
||||
STATUS SetOpInputIdx(const caffe::LayerParameter &layer, schema::CNodeT *op, TensorCache *tensorCache);
|
||||
|
||||
STATUS SetOpOutputIdx(const caffe::LayerParameter &layer, schema::CNodeT *op, TensorCache *tensorCache);
|
||||
|
@ -61,5 +59,5 @@ class CaffeModelParser : public ModelParser {
|
|||
} // namespace lite
|
||||
} // namespace mindspore
|
||||
|
||||
#endif // MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_MODEL_PARSER_H_
|
||||
#endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_MODEL_PARSER_H_
|
||||
|
||||
|
|
|
@ -14,8 +14,8 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include <memory>
|
||||
#include "mindspore/lite/tools/converter/parser/caffe/caffe_node_parser.h"
|
||||
#include <memory>
|
||||
#include "securec/include/securec.h"
|
||||
#include "ir/dtype/type_id.h"
|
||||
|
||||
|
@ -35,11 +35,11 @@ schema::TensorT *ConvertWeight(const caffe::BlobProto &proto) {
|
|||
for (size_t i = 0; i < shapeVec.size(); ++i) {
|
||||
int dim = shapeVec[i];
|
||||
if (dim <= 0) {
|
||||
// MS_LOGE("Convert weight fail, Blob size invalid");
|
||||
MS_LOG(ERROR) << "Convert weight fail, Blob size invalid";
|
||||
return nullptr;
|
||||
}
|
||||
if (dim >= INT_MAX / count) {
|
||||
// MS_LOGE("Convert weight fail, Blob size exceeds INT_MAX, dim:%d, count:%d", dim, count);
|
||||
MS_LOG(ERROR) << "Convert weight fail, Blob size exceeds INT_MAX, dim:" << dim << "count:" << count;
|
||||
return nullptr;
|
||||
}
|
||||
count *= dim;
|
||||
|
@ -53,8 +53,8 @@ schema::TensorT *ConvertWeight(const caffe::BlobProto &proto) {
|
|||
if (proto.double_data_size() > 0) {
|
||||
// datatype double
|
||||
if (count != proto.double_data_size()) {
|
||||
// MS_LOGE("Convert weight fail, Blob size does not match shape size, shape size:%d, blob size:%d", count,
|
||||
// proto.double_data_size());
|
||||
MS_LOG(ERROR) << "Convert weight fail, Blob size does not match shape size, shape size: " << count
|
||||
<< "blob size:" << proto.double_data_size();
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
|
@ -68,8 +68,8 @@ schema::TensorT *ConvertWeight(const caffe::BlobProto &proto) {
|
|||
} else {
|
||||
// datatype float
|
||||
if (count != proto.data_size()) {
|
||||
// MS_LOGE("Convert weight fail, Blob size does not match shape size, shape size:%d, blob.data_size:%d", count,
|
||||
// proto.data_size());
|
||||
MS_LOG(ERROR) << "Convert weight fail, Blob size does not match shape size, shape size" << count
|
||||
<< "blob.data_size:%d" << proto.data_size();
|
||||
return nullptr;
|
||||
}
|
||||
weight->data.resize(count * sizeof(float));
|
||||
|
@ -81,11 +81,11 @@ schema::TensorT *ConvertWeight(const caffe::BlobProto &proto) {
|
|||
return weight.release();
|
||||
}
|
||||
|
||||
STATUS ConvertShape(const caffe::BlobProto &proto, std::vector<int32_t> *shape) {
|
||||
STATUS ConvertShape(const caffe::BlobProto &proto,
|
||||
std::vector<int32_t> *shape) {
|
||||
shape->clear();
|
||||
|
||||
if (proto.has_num() || proto.has_channels() || proto.has_height() || proto.has_width()) {
|
||||
// num, channels, height, width
|
||||
shape->push_back(proto.num());
|
||||
shape->push_back(proto.channels());
|
||||
shape->push_back(proto.height());
|
||||
|
@ -99,5 +99,4 @@ STATUS ConvertShape(const caffe::BlobProto &proto, std::vector<int32_t> *shape)
|
|||
}
|
||||
} // namespace lite
|
||||
} // namespace mindspore
|
||||
//
|
||||
|
||||
|
|
|
@ -14,8 +14,8 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_NODE_PARSER_H_
|
||||
#define MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_NODE_PARSER_H_
|
||||
#ifndef MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_NODE_PARSER_H_
|
||||
#define MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_NODE_PARSER_H_
|
||||
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
@ -34,8 +34,10 @@ class CaffeNodeParser {
|
|||
|
||||
virtual ~CaffeNodeParser() {}
|
||||
|
||||
virtual int Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op,
|
||||
std::vector<schema::TensorT *> *weightVec) = 0;
|
||||
virtual int Parse(const caffe::LayerParameter &proto,
|
||||
const caffe::LayerParameter &weight,
|
||||
schema::CNodeT *op,
|
||||
std::vector<schema::TensorT *> *weightVec) = 0;
|
||||
|
||||
protected:
|
||||
const std::string &name;
|
||||
|
@ -43,9 +45,10 @@ class CaffeNodeParser {
|
|||
|
||||
schema::TensorT *ConvertWeight(const caffe::BlobProto &proto);
|
||||
|
||||
STATUS ConvertShape(const caffe::BlobProto &proto, std::vector<int32_t> *shape);
|
||||
STATUS ConvertShape(const caffe::BlobProto &proto,
|
||||
std::vector<int32_t> *shape);
|
||||
} // namespace lite
|
||||
} // namespace mindspore
|
||||
|
||||
#endif // MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_NODE_PARSER_H_
|
||||
#endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_NODE_PARSER_H_
|
||||
|
||||
|
|
|
@ -20,7 +20,14 @@ namespace mindspore {
|
|||
namespace lite {
|
||||
CaffeNodeParserRegistry::CaffeNodeParserRegistry() {}
|
||||
|
||||
CaffeNodeParserRegistry::~CaffeNodeParserRegistry() {}
|
||||
CaffeNodeParserRegistry::~CaffeNodeParserRegistry() {
|
||||
for (auto ite : parsers) {
|
||||
if (ite.second != nullptr) {
|
||||
delete ite.second;
|
||||
ite.second = nullptr;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
CaffeNodeParserRegistry *CaffeNodeParserRegistry::GetInstance() {
|
||||
static CaffeNodeParserRegistry instance;
|
||||
|
|
|
@ -14,8 +14,8 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_NODE_PARSER_REGISTRY_H_
|
||||
#define MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_NODE_PARSER_REGISTRY_H_
|
||||
#ifndef MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_NODE_PARSER_REGISTRY_H_
|
||||
#define MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_NODE_PARSER_REGISTRY_H_
|
||||
|
||||
#include <string>
|
||||
#include <unordered_map>
|
||||
|
@ -44,5 +44,5 @@ class CaffeNodeRegistrar {
|
|||
};
|
||||
} // namespace mindspore::lite
|
||||
|
||||
#endif // MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_NODE_PARSER_REGISTRY_H_
|
||||
#endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_NODE_PARSER_REGISTRY_H_
|
||||
|
||||
|
|
|
@ -20,7 +20,6 @@
|
|||
#include "google/protobuf/io/zero_copy_stream_impl.h"
|
||||
#include "google/protobuf/text_format.h"
|
||||
#include "google/protobuf/io/coded_stream.h"
|
||||
#include "securec/include/securec.h"
|
||||
#include "src/common/file_utils.h"
|
||||
|
||||
namespace mindspore {
|
||||
|
@ -31,57 +30,58 @@ static const int WARNING_THRESHOLD = 536870912 * 2;
|
|||
bool ReadProtoFromCodedInputStream(google::protobuf::io::CodedInputStream *coded_stream,
|
||||
google::protobuf::Message *proto) {
|
||||
if (proto == nullptr) {
|
||||
// MS_LOGE("incorrect parameter. nullptr == proto");
|
||||
MS_LOG(ERROR) << "incorrect parameter. nullptr == proto";
|
||||
return false;
|
||||
}
|
||||
coded_stream->SetTotalBytesLimit(PROTO_READ_BYTES_LIMIT, WARNING_THRESHOLD);
|
||||
return proto->ParseFromCodedStream(coded_stream);
|
||||
}
|
||||
|
||||
STATUS ReadProtoFromText(const char *file, google::protobuf::Message *message) {
|
||||
STATUS ReadProtoFromText(const char *file,
|
||||
google::protobuf::Message *message) {
|
||||
if (file == nullptr || message == nullptr) {
|
||||
return RET_ERROR;
|
||||
}
|
||||
|
||||
std::string realPath = RealPath(file);
|
||||
if (realPath.empty()) {
|
||||
// MS_LOGE("Proto file path is '%s' not valid", file);
|
||||
MS_LOG(ERROR) << "Proto file path " << file <<" is not valid";
|
||||
return RET_ERROR;
|
||||
}
|
||||
|
||||
std::ifstream fs(realPath.c_str(), std::ifstream::in);
|
||||
|
||||
if (!fs.is_open()) {
|
||||
// MS_LOGE("Open proto file '%s' failed.", file);
|
||||
MS_LOG(ERROR) << "Open proto file " << file << " failed.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
|
||||
google::protobuf::io::IstreamInputStream input(&fs);
|
||||
bool status = google::protobuf::TextFormat::Parse(&input, message);
|
||||
if (status != true) {
|
||||
// MS_LOGE("call [google::protobuf::TextFormat::Parse] func status fail, please check your text file.");
|
||||
if (!status) {
|
||||
MS_LOG(ERROR) << "call [google::protobuf::TextFormat::Parse] func status fail, please check your text file.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
|
||||
fs.close();
|
||||
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
STATUS ReadProtoFromBinaryFile(const char *file, google::protobuf::Message *message) {
|
||||
STATUS ReadProtoFromBinaryFile(const char *file,
|
||||
google::protobuf::Message *message) {
|
||||
if (file == nullptr || message == nullptr) {
|
||||
return RET_ERROR;
|
||||
}
|
||||
|
||||
std::string realPath = RealPath(file);
|
||||
if (realPath.empty()) {
|
||||
// MS_LOGE("Weight file path is '%s' not valid", file);
|
||||
MS_LOG(ERROR) << "Weight file path " << file << " is not valid";
|
||||
return RET_ERROR;
|
||||
}
|
||||
|
||||
std::ifstream fs(realPath, std::ifstream::in | std::ifstream::binary);
|
||||
if (!fs.is_open()) {
|
||||
// MS_LOGE("Open weight file '%s' failed.", file);
|
||||
MS_LOG(ERROR) << "Open weight file " << file << " failed.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
|
||||
|
@ -92,7 +92,7 @@ STATUS ReadProtoFromBinaryFile(const char *file, google::protobuf::Message *mess
|
|||
fs.close();
|
||||
|
||||
if (!success) {
|
||||
// MS_LOGE("Parse %s failed.", file);
|
||||
MS_LOG(ERROR) << "Parse " << file << " failed.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
|
||||
|
|
|
@ -14,13 +14,12 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_PARSE_UTILS_H_
|
||||
#define MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_PARSE_UTILS_H_
|
||||
#ifndef MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_PARSE_UTILS_H_
|
||||
#define MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_PARSE_UTILS_H_
|
||||
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include "google/protobuf/message.h"
|
||||
|
||||
#include "tools/converter/parser/caffe/caffe.pb.h"
|
||||
#include "include/errorcode.h"
|
||||
#include "mindspore/lite/schema/inner/model_generated.h"
|
||||
|
@ -30,11 +29,13 @@ namespace lite {
|
|||
bool ReadProtoFromCodedInputStream(google::protobuf::io::CodedInputStream *coded_stream,
|
||||
google::protobuf::Message *proto);
|
||||
|
||||
STATUS ReadProtoFromText(const char *file, google::protobuf::Message *message);
|
||||
STATUS ReadProtoFromText(const char *file,
|
||||
google::protobuf::Message *message);
|
||||
|
||||
STATUS ReadProtoFromBinaryFile(const char *file, google::protobuf::Message *message);
|
||||
STATUS ReadProtoFromBinaryFile(const char *file,
|
||||
google::protobuf::Message *message);
|
||||
} // namespace lite
|
||||
} // namespace mindspore
|
||||
|
||||
#endif // MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_PARSE_UTILS_H_
|
||||
#endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_PARSE_UTILS_H_
|
||||
|
||||
|
|
|
@ -23,10 +23,24 @@ STATUS CaffePermuteParser::Parse(const caffe::LayerParameter &proto,
|
|||
const caffe::LayerParameter &weight,
|
||||
schema::CNodeT *op,
|
||||
std::vector<schema::TensorT *> *weightVec) {
|
||||
op->name = proto.name();
|
||||
std::unique_ptr<schema::TransposeT> attr = std::make_unique<schema::TransposeT>();
|
||||
const caffe::PermuteParameter permuteParam = proto.permute_param();
|
||||
MS_LOG(DEBUG) << "parse CaffePermuteParser";
|
||||
if (op == nullptr) {
|
||||
MS_LOG(ERROR) << "op is null";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
op->primitive = std::make_unique<schema::PrimitiveT>();
|
||||
if (op->primitive == nullptr) {
|
||||
MS_LOG(ERROR) << "op->primitive is null";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
|
||||
std::unique_ptr<schema::TransposeT> attr = std::make_unique<schema::TransposeT>();
|
||||
if (attr == nullptr) {
|
||||
MS_LOG(ERROR) << "new op failed";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
|
||||
const caffe::PermuteParameter permuteParam = proto.permute_param();
|
||||
const int num_order_dims = permuteParam.order_size();
|
||||
attr->perm.resize(num_order_dims);
|
||||
for (int i = 0; i < num_order_dims; ++i) {
|
||||
|
@ -34,9 +48,9 @@ STATUS CaffePermuteParser::Parse(const caffe::LayerParameter &proto,
|
|||
}
|
||||
attr->conjugate = false;
|
||||
|
||||
op->primitive = std::make_unique<schema::PrimitiveT>();
|
||||
op->primitive->value.value = attr.release();
|
||||
op->name = proto.name();
|
||||
op->primitive->value.type = schema::PrimitiveType_Transpose;
|
||||
op->primitive->value.value = attr.release();
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
|
|
|
@ -14,8 +14,8 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_PERMUTE_PARSER_H_
|
||||
#define MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_PERMUTE_PARSER_H_
|
||||
#ifndef MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_PERMUTE_PARSER_H_
|
||||
#define MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_PERMUTE_PARSER_H_
|
||||
|
||||
#include <vector>
|
||||
#include "mindspore/lite/tools/converter/parser/caffe/caffe_node_parser.h"
|
||||
|
@ -27,11 +27,13 @@ class CaffePermuteParser : public CaffeNodeParser {
|
|||
public:
|
||||
CaffePermuteParser() : CaffeNodeParser("Permute") {}
|
||||
|
||||
STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op,
|
||||
STATUS Parse(const caffe::LayerParameter &proto,
|
||||
const caffe::LayerParameter &weight,
|
||||
schema::CNodeT *op,
|
||||
std::vector<schema::TensorT *> *weightVec) override;
|
||||
};
|
||||
} // namespace lite
|
||||
} // namespace mindspore
|
||||
|
||||
#endif // MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_PERMUTE_PARSER_H_
|
||||
#endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_PERMUTE_PARSER_H_
|
||||
|
||||
|
|
|
@ -14,9 +14,8 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include <memory>
|
||||
#include "mindspore/lite/tools/converter/parser/caffe/caffe_pooling_parser.h"
|
||||
#include "utils/log_adapter.h"
|
||||
#include <memory>
|
||||
|
||||
const uint32_t INNERPRODUCT_WINDOW_DEFAULT_VALUE = 0;
|
||||
const uint32_t INNERPRODUCT_PAD_DEFAULT_VALUE = 0;
|
||||
|
@ -27,32 +26,47 @@ STATUS CaffePoolingParser::Parse(const caffe::LayerParameter &proto,
|
|||
const caffe::LayerParameter &weight,
|
||||
schema::CNodeT *op,
|
||||
std::vector<schema::TensorT *> *weightVec) {
|
||||
MS_LOG(DEBUG) << "parse CaffePoolingParser";
|
||||
if (op == nullptr) {
|
||||
MS_LOG(ERROR) << "op is null";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
op->primitive = std::make_unique<schema::PrimitiveT>();
|
||||
if (op->primitive == nullptr) {
|
||||
MS_LOG(ERROR) << "op->primitive is null";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
|
||||
std::unique_ptr<schema::PoolingT> attr = std::make_unique<schema::PoolingT>();
|
||||
if (attr == nullptr) {
|
||||
MS_LOG(ERROR) << "new op failed";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
|
||||
attr->format = schema::Format_NCHW;
|
||||
|
||||
const caffe::PoolingParameter poolingParam = proto.pooling_param();
|
||||
|
||||
auto status = ParsePads(poolingParam, attr.get());
|
||||
if (status != RET_OK) {
|
||||
// MS_LOGE("ParsePads for %s failed", proto.name().c_str());
|
||||
MS_LOG(ERROR) << "ParsePads for " << proto.name().c_str() << " failed";
|
||||
return RET_ERROR;
|
||||
}
|
||||
|
||||
status = ParseStrides(poolingParam, attr.get());
|
||||
if (status != RET_OK) {
|
||||
// MS_LOGE("ParseStrides for %s failed", proto.name().c_str());
|
||||
MS_LOG(ERROR) << "ParseStrides for " << proto.name().c_str() << " failed";
|
||||
return RET_ERROR;
|
||||
}
|
||||
|
||||
status = ParseWindows(poolingParam, attr.get());
|
||||
if (status != RET_OK) {
|
||||
// MS_LOGE("ParseWindows for %s failed", proto.name().c_str());
|
||||
MS_LOG(ERROR) << "ParseWindows for " << proto.name().c_str() << " failed";
|
||||
return RET_ERROR;
|
||||
}
|
||||
|
||||
status = ParsePoolingMode(poolingParam, attr.get());
|
||||
if (status != RET_OK) {
|
||||
// MS_LOGE("ParsePoolingMode for %s failed", proto.name().c_str());
|
||||
MS_LOG(ERROR) << "ParsePoolingMode for " << proto.name().c_str() << " failed";
|
||||
return RET_ERROR;
|
||||
}
|
||||
|
||||
|
@ -67,18 +81,19 @@ STATUS CaffePoolingParser::Parse(const caffe::LayerParameter &proto,
|
|||
MS_ASSERT(false);
|
||||
}
|
||||
}
|
||||
|
||||
attr->padMode = schema::PadMode_CAFFE;
|
||||
op->primitive = std::make_unique<schema::PrimitiveT>();
|
||||
op->primitive->value.value = attr.release();
|
||||
|
||||
op->name = proto.name();
|
||||
op->primitive->value.type = schema::PrimitiveType_Pooling;
|
||||
op->primitive->value.value = attr.release();
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
STATUS CaffePoolingParser::ParsePads(const caffe::PoolingParameter &poolingParam, schema::PoolingT *attr) {
|
||||
STATUS CaffePoolingParser::ParsePads(const caffe::PoolingParameter &poolingParam,
|
||||
schema::PoolingT *attr) {
|
||||
if (poolingParam.has_pad_h() && poolingParam.has_pad_w()) {
|
||||
if (poolingParam.has_pad()) {
|
||||
// MS_LOGE("Either pad or pad_h/w should be specified; not both");
|
||||
MS_LOG(ERROR) << "Either pad or pad_h/w should be specified; not both";
|
||||
return RET_ERROR;
|
||||
}
|
||||
attr->padLeft = poolingParam.pad_w();
|
||||
|
@ -94,10 +109,11 @@ STATUS CaffePoolingParser::ParsePads(const caffe::PoolingParameter &poolingParam
|
|||
return RET_OK;
|
||||
}
|
||||
|
||||
STATUS CaffePoolingParser::ParseStrides(const caffe::PoolingParameter &poolingParam, schema::PoolingT *attr) {
|
||||
STATUS CaffePoolingParser::ParseStrides(const caffe::PoolingParameter &poolingParam,
|
||||
schema::PoolingT *attr) {
|
||||
if (poolingParam.has_stride_h() && poolingParam.has_stride_w()) {
|
||||
if (poolingParam.has_stride()) {
|
||||
// MS_LOGE("Either stride or stride_h/w should be specified; not both");
|
||||
MS_LOG(ERROR) << "Either stride or stride_h/w should be specified; not both";
|
||||
return RET_ERROR;
|
||||
}
|
||||
attr->strideH = poolingParam.stride_h();
|
||||
|
@ -109,10 +125,11 @@ STATUS CaffePoolingParser::ParseStrides(const caffe::PoolingParameter &poolingPa
|
|||
return RET_OK;
|
||||
}
|
||||
|
||||
STATUS CaffePoolingParser::ParseWindows(const caffe::PoolingParameter &poolingParam, schema::PoolingT *attr) {
|
||||
STATUS CaffePoolingParser::ParseWindows(const caffe::PoolingParameter &poolingParam,
|
||||
schema::PoolingT *attr) {
|
||||
if (poolingParam.has_global_pooling() && poolingParam.global_pooling()) {
|
||||
if (poolingParam.has_kernel_size() || poolingParam.has_kernel_h() || poolingParam.has_kernel_w()) {
|
||||
// MS_LOGE("With Global_pooling: true Filter size cannot specified");
|
||||
MS_LOG(ERROR) << "With Global_pooling: true Filter size cannot specified";
|
||||
return RET_ERROR;
|
||||
}
|
||||
attr->windowH = INNERPRODUCT_WINDOW_DEFAULT_VALUE;
|
||||
|
@ -120,11 +137,11 @@ STATUS CaffePoolingParser::ParseWindows(const caffe::PoolingParameter &poolingPa
|
|||
attr->global = true;
|
||||
} else {
|
||||
if (poolingParam.has_kernel_size() == (poolingParam.has_kernel_h() || poolingParam.has_kernel_w())) {
|
||||
// MS_LOGE("Filter size is kernel_size OR kernel_h and kernel_w; not both");
|
||||
MS_LOG(ERROR) << "Filter size is kernel_size OR kernel_h and kernel_w; not both";
|
||||
return RET_ERROR;
|
||||
}
|
||||
if (!poolingParam.has_kernel_size() && !(poolingParam.has_kernel_h() && poolingParam.has_kernel_w())) {
|
||||
// MS_LOGE("For non-square filters both kernel_h and kernel_w are required.");
|
||||
MS_LOG(ERROR) << "For non-square filters both kernel_h and kernel_w are required.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
|
||||
|
@ -139,13 +156,14 @@ STATUS CaffePoolingParser::ParseWindows(const caffe::PoolingParameter &poolingPa
|
|||
return RET_OK;
|
||||
}
|
||||
|
||||
STATUS CaffePoolingParser::ParsePoolingMode(const caffe::PoolingParameter &poolingParam, schema::PoolingT *attr) {
|
||||
STATUS CaffePoolingParser::ParsePoolingMode(const caffe::PoolingParameter &poolingParam,
|
||||
schema::PoolingT *attr) {
|
||||
if (poolingParam.pool() == caffe::PoolingParameter::MAX) {
|
||||
attr->poolingMode = schema::PoolMode_MAX_POOLING;
|
||||
} else if (poolingParam.pool() == caffe::PoolingParameter::AVE) {
|
||||
attr->poolingMode = schema::PoolMode_MEAN_POOLING;
|
||||
} else {
|
||||
// MS_LOGE("Pooling param`s PoolingMode is not MAX either AVE. MindSpore support MAX and AVE only.");
|
||||
MS_LOG(ERROR) << "Pooling param`s PoolingMode is not MAX either AVE. MindSpore support MAX and AVE only.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
return RET_OK;
|
||||
|
|
|
@ -14,8 +14,8 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_POOLING_PARSER_H_
|
||||
#define MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_POOLING_PARSER_H_
|
||||
#ifndef MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_POOLING_PARSER_H_
|
||||
#define MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_POOLING_PARSER_H_
|
||||
|
||||
#include <vector>
|
||||
#include "mindspore/lite/tools/converter/parser/caffe/caffe_node_parser.h"
|
||||
|
@ -27,19 +27,25 @@ class CaffePoolingParser : public CaffeNodeParser {
|
|||
public:
|
||||
CaffePoolingParser() : CaffeNodeParser("pooling") {}
|
||||
|
||||
STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op,
|
||||
STATUS Parse(const caffe::LayerParameter &proto,
|
||||
const caffe::LayerParameter &weight,
|
||||
schema::CNodeT *op,
|
||||
std::vector<schema::TensorT *> *weightVec) override;
|
||||
|
||||
STATUS ParsePads(const caffe::PoolingParameter &poolingParam, schema::PoolingT *attr);
|
||||
STATUS ParsePads(const caffe::PoolingParameter &poolingParam,
|
||||
schema::PoolingT *attr);
|
||||
|
||||
STATUS ParseStrides(const caffe::PoolingParameter &poolingParam, schema::PoolingT *attr);
|
||||
STATUS ParseStrides(const caffe::PoolingParameter &poolingParam,
|
||||
schema::PoolingT *attr);
|
||||
|
||||
STATUS ParseWindows(const caffe::PoolingParameter &poolingParam, schema::PoolingT *attr);
|
||||
STATUS ParseWindows(const caffe::PoolingParameter &poolingParam,
|
||||
schema::PoolingT *attr);
|
||||
|
||||
STATUS ParsePoolingMode(const caffe::PoolingParameter &poolingParam, schema::PoolingT *attr);
|
||||
STATUS ParsePoolingMode(const caffe::PoolingParameter &poolingParam,
|
||||
schema::PoolingT *attr);
|
||||
};
|
||||
} // namespace lite
|
||||
} // namespace mindspore
|
||||
|
||||
#endif // MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_POOLING_PARSER_H_
|
||||
#endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_POOLING_PARSER_H_
|
||||
|
||||
|
|
|
@ -14,8 +14,9 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include <memory>
|
||||
#include "mindspore/lite/tools/converter/parser/caffe/caffe_power_parser.h"
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
static const float CAFFE_POWER_DEFAULT_POWER = 1.0;
|
||||
static const float CAFFE_POWER_DEFAULT_SCALE = 1.0;
|
||||
|
@ -27,7 +28,24 @@ STATUS CaffePowerParser::Parse(const caffe::LayerParameter &proto,
|
|||
const caffe::LayerParameter &weight,
|
||||
schema::CNodeT *op,
|
||||
std::vector<schema::TensorT *> *weightVec) {
|
||||
MS_LOG(DEBUG) << "parse CaffePowerParser";
|
||||
if (op == nullptr) {
|
||||
MS_LOG(ERROR) << "op is null";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
op->primitive = std::make_unique<schema::PrimitiveT>();
|
||||
if (op->primitive == nullptr) {
|
||||
MS_LOG(ERROR) << "op->primitive is null";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
|
||||
std::unique_ptr<schema::PowerT> attr = std::make_unique<schema::PowerT>();
|
||||
if (attr == nullptr) {
|
||||
MS_LOG(ERROR) << "new op failed";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
|
||||
|
||||
const caffe::PowerParameter powerParam = proto.power_param();
|
||||
if (proto.has_power_param()) {
|
||||
attr->power = powerParam.has_power() ? powerParam.power() : CAFFE_POWER_DEFAULT_POWER;
|
||||
|
@ -38,7 +56,8 @@ STATUS CaffePowerParser::Parse(const caffe::LayerParameter &proto,
|
|||
attr->scale = CAFFE_POWER_DEFAULT_SCALE;
|
||||
attr->shift = CAFFE_POWER_DEFAULT_SHIFT;
|
||||
}
|
||||
op->primitive = std::make_unique<schema::PrimitiveT>();
|
||||
|
||||
op->name = proto.name();
|
||||
op->primitive->value.type = schema::PrimitiveType_Power;
|
||||
op->primitive->value.value = attr.release();
|
||||
return RET_OK;
|
||||
|
|
|
@ -14,8 +14,8 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_POWER_PARSER_H_
|
||||
#define MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_POWER_PARSER_H_
|
||||
#ifndef MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_POWER_PARSER_H_
|
||||
#define MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_POWER_PARSER_H_
|
||||
|
||||
#include <vector>
|
||||
#include "mindspore/lite/tools/converter/parser/caffe/caffe_node_parser.h"
|
||||
|
@ -27,11 +27,13 @@ class CaffePowerParser : public CaffeNodeParser {
|
|||
public:
|
||||
CaffePowerParser() : CaffeNodeParser("power") {}
|
||||
|
||||
STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op,
|
||||
std::vector<schema::TensorT *> *weightVec) override;
|
||||
STATUS Parse(const caffe::LayerParameter &proto,
|
||||
const caffe::LayerParameter &weight,
|
||||
schema::CNodeT *op,
|
||||
std::vector<schema::TensorT *> *weightVec) override;
|
||||
};
|
||||
} // namespace lite
|
||||
} // namespace mindspore
|
||||
|
||||
#endif // MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_POWER_PARSER_H_
|
||||
#endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_POWER_PARSER_H_
|
||||
|
||||
|
|
|
@ -14,8 +14,8 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include <memory>
|
||||
#include "mindspore/lite/tools/converter/parser/caffe/caffe_prelu_parser.h"
|
||||
#include <memory>
|
||||
|
||||
namespace mindspore {
|
||||
namespace lite {
|
||||
|
@ -23,7 +23,23 @@ STATUS CaffePReluParser::Parse(const caffe::LayerParameter &proto,
|
|||
const caffe::LayerParameter &weight,
|
||||
schema::CNodeT *op,
|
||||
std::vector<schema::TensorT *> *weightVec) {
|
||||
MS_LOG(DEBUG) << "parse CaffePReluParser";
|
||||
if (op == nullptr) {
|
||||
MS_LOG(ERROR) << "op is null";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
op->primitive = std::make_unique<schema::PrimitiveT>();
|
||||
if (op->primitive == nullptr) {
|
||||
MS_LOG(ERROR) << "op->primitive is null";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
|
||||
std::unique_ptr<schema::CaffePReLUT> attr = std::make_unique<schema::CaffePReLUT>();
|
||||
if (attr == nullptr) {
|
||||
MS_LOG(ERROR) << "new op failed";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
|
||||
const caffe::PReLUParameter pReluParam = proto.prelu_param();
|
||||
if (pReluParam.has_channel_shared()) {
|
||||
attr->channelShared = pReluParam.channel_shared();
|
||||
|
@ -32,17 +48,18 @@ STATUS CaffePReluParser::Parse(const caffe::LayerParameter &proto,
|
|||
}
|
||||
|
||||
if (weight.blobs_size() == 0) {
|
||||
// MS_LOGE("PRelu No blobs data in layer %s", proto.name().c_str());
|
||||
MS_LOG(ERROR) << "PRelu No blobs data in layer " << proto.name().c_str();
|
||||
return RET_ERROR;
|
||||
}
|
||||
|
||||
auto slope = ConvertWeight(weight.blobs(0));
|
||||
if (slope == nullptr) {
|
||||
// MS_LOGE("CaffePRelu convert slope for layer %s failed.", weight.name().c_str());
|
||||
MS_LOG(ERROR) << "CaffePRelu convert slope for layer " << weight.name().c_str() << " failed.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
weightVec->push_back(slope);
|
||||
op->primitive = std::make_unique<schema::PrimitiveT>();
|
||||
|
||||
op->name = proto.name();
|
||||
op->primitive->value.type = schema::PrimitiveType_CaffePReLU;
|
||||
op->primitive->value.value = attr.release();
|
||||
return RET_OK;
|
||||
|
|
|
@ -14,8 +14,8 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_PRELU_PARSER_H_
|
||||
#define MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_PRELU_PARSER_H_
|
||||
#ifndef MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_PRELU_PARSER_H_
|
||||
#define MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_PRELU_PARSER_H_
|
||||
|
||||
#include <vector>
|
||||
#include "mindspore/lite/tools/converter/parser/caffe/caffe_node_parser.h"
|
||||
|
@ -27,11 +27,13 @@ class CaffePReluParser : public CaffeNodeParser {
|
|||
public:
|
||||
CaffePReluParser() : CaffeNodeParser("pRelu") {}
|
||||
|
||||
STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op,
|
||||
std::vector<schema::TensorT *> *weightVec) override;
|
||||
STATUS Parse(const caffe::LayerParameter &proto,
|
||||
const caffe::LayerParameter &weight,
|
||||
schema::CNodeT *op,
|
||||
std::vector<schema::TensorT *> *weightVec) override;
|
||||
};
|
||||
} // namespace lite
|
||||
} // namespace mindspore
|
||||
|
||||
#endif // MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_PRELU_PARSER_H_
|
||||
#endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_PRELU_PARSER_H_
|
||||
|
||||
|
|
|
@ -14,8 +14,8 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include <memory>
|
||||
#include "mindspore/lite/tools/converter/parser/caffe/caffe_relu_parser.h"
|
||||
#include <memory>
|
||||
|
||||
namespace mindspore {
|
||||
namespace lite {
|
||||
|
@ -23,10 +23,24 @@ STATUS CaffeReluParser::Parse(const caffe::LayerParameter &proto,
|
|||
const caffe::LayerParameter &weight,
|
||||
schema::CNodeT *op,
|
||||
std::vector<schema::TensorT *> *weightVec) {
|
||||
MS_LOG(DEBUG) << "parse CaffeReluParser";
|
||||
if (op == nullptr) {
|
||||
MS_LOG(ERROR) << "op is null";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
op->primitive = std::make_unique<schema::PrimitiveT>();
|
||||
if (op->primitive == nullptr) {
|
||||
MS_LOG(ERROR) << "op->primitive is null";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
|
||||
std::unique_ptr<schema::ActivationT> attr = std::make_unique<schema::ActivationT>();
|
||||
if (attr == nullptr) {
|
||||
MS_LOG(ERROR) << "new op failed";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
|
||||
attr->type = schema::ActivationType_RELU;
|
||||
// relu: negative_slope = 0, no parameter;
|
||||
// leakyrelu: negative_slope != 0;
|
||||
if (proto.has_relu_param() && proto.relu_param().has_negative_slope()) {
|
||||
float negative_slope = proto.relu_param().negative_slope();
|
||||
if (0 != negative_slope) {
|
||||
|
@ -34,9 +48,10 @@ STATUS CaffeReluParser::Parse(const caffe::LayerParameter &proto,
|
|||
attr->alpha = negative_slope;
|
||||
}
|
||||
}
|
||||
op->primitive = std::make_unique<schema::PrimitiveT>();
|
||||
op->primitive->value.value = attr.release();
|
||||
|
||||
op->name = proto.name();
|
||||
op->primitive->value.type = schema::PrimitiveType_Activation;
|
||||
op->primitive->value.value = attr.release();
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
|
|
|
@ -14,8 +14,8 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_RELU_PARSER_H_
|
||||
#define MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_RELU_PARSER_H_
|
||||
#ifndef MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_RELU_PARSER_H_
|
||||
#define MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_RELU_PARSER_H_
|
||||
|
||||
#include <vector>
|
||||
#include "mindspore/lite/tools/converter/parser/caffe/caffe_node_parser.h"
|
||||
|
@ -27,11 +27,13 @@ class CaffeReluParser : public CaffeNodeParser {
|
|||
public:
|
||||
CaffeReluParser() : CaffeNodeParser("relu") {}
|
||||
|
||||
STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op,
|
||||
STATUS Parse(const caffe::LayerParameter &proto,
|
||||
const caffe::LayerParameter &weight,
|
||||
schema::CNodeT *op,
|
||||
std::vector<schema::TensorT *> *weightVec) override;
|
||||
};
|
||||
} // namespace lite
|
||||
} // namespace mindspore
|
||||
|
||||
#endif // MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_RELU_PARSER_H_
|
||||
#endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_RELU_PARSER_H_
|
||||
|
||||
|
|
|
@ -14,8 +14,8 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include <memory>
|
||||
#include "mindspore/lite/tools/converter/parser/caffe/caffe_reshape_parser.h"
|
||||
#include <memory>
|
||||
|
||||
namespace mindspore {
|
||||
namespace lite {
|
||||
|
@ -23,12 +23,28 @@ STATUS CaffeReshapeParser::Parse(const caffe::LayerParameter &proto,
|
|||
const caffe::LayerParameter &weight,
|
||||
schema::CNodeT *op,
|
||||
std::vector<schema::TensorT *> *weightVec) {
|
||||
MS_LOG(DEBUG) << "parse CaffeReshapeParser";
|
||||
if (op == nullptr) {
|
||||
MS_LOG(ERROR) << "op is null";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
op->primitive = std::make_unique<schema::PrimitiveT>();
|
||||
if (op->primitive == nullptr) {
|
||||
MS_LOG(ERROR) << "op->primitive is null";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
|
||||
std::unique_ptr<schema::ReshapeT> attr = std::make_unique<schema::ReshapeT>();
|
||||
if (attr == nullptr) {
|
||||
MS_LOG(ERROR) << "new op failed";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
|
||||
attr->format = schema::Format_NCHW;
|
||||
|
||||
const caffe::ReshapeParameter reshapeParam = proto.reshape_param();
|
||||
if (!reshapeParam.has_shape()) {
|
||||
// MS_LOGE("Reshape has no shape info, ret fail");
|
||||
MS_LOG(ERROR) << "Reshape has no shape info, ret fail";
|
||||
return RET_ERROR;
|
||||
}
|
||||
|
||||
|
@ -36,7 +52,8 @@ STATUS CaffeReshapeParser::Parse(const caffe::LayerParameter &proto,
|
|||
for (int i = 0; i < blob_shape.dim_size(); i++) {
|
||||
attr->shape.push_back(blob_shape.dim(i));
|
||||
}
|
||||
op->primitive = std::make_unique<schema::PrimitiveT>();
|
||||
|
||||
op->name = proto.name();
|
||||
op->primitive->value.type = schema::PrimitiveType_Reshape;
|
||||
op->primitive->value.value = attr.release();
|
||||
return RET_OK;
|
||||
|
|
|
@ -14,8 +14,8 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_RESHAPE_PARSER_H_
|
||||
#define MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_RESHAPE_PARSER_H_
|
||||
#ifndef MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_RESHAPE_PARSER_H_
|
||||
#define MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_RESHAPE_PARSER_H_
|
||||
|
||||
#include <vector>
|
||||
#include "mindspore/lite/tools/converter/parser/caffe/caffe_node_parser.h"
|
||||
|
@ -27,11 +27,13 @@ class CaffeReshapeParser : public CaffeNodeParser {
|
|||
public:
|
||||
CaffeReshapeParser() : CaffeNodeParser("reshape") {}
|
||||
|
||||
STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op,
|
||||
STATUS Parse(const caffe::LayerParameter &proto,
|
||||
const caffe::LayerParameter &weight,
|
||||
schema::CNodeT *op,
|
||||
std::vector<schema::TensorT *> *weightVec) override;
|
||||
};
|
||||
} // namespace lite
|
||||
} // namespace mindspore
|
||||
|
||||
#endif // MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_RESHAPE_PARSER_H_
|
||||
#endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_RESHAPE_PARSER_H_
|
||||
|
||||
|
|
|
@ -14,21 +14,38 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include <memory>
|
||||
#include "mindspore/lite/tools/converter/parser/caffe/caffe_scale_parser.h"
|
||||
#include <memory>
|
||||
|
||||
const int32_t NCHW_DIM_C = 1;
|
||||
const int32_t DIM_DEFAULT_SIZE = 4;
|
||||
|
||||
namespace mindspore {
|
||||
namespace lite {
|
||||
STATUS CaffeScaleParser::Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight,
|
||||
schema::CNodeT *op, std::vector<schema::TensorT *> *weightVec) {
|
||||
STATUS CaffeScaleParser::Parse(const caffe::LayerParameter &proto,
|
||||
const caffe::LayerParameter &weight,
|
||||
schema::CNodeT *op,
|
||||
std::vector<schema::TensorT *> *weightVec) {
|
||||
MS_LOG(DEBUG) << "parse CaffeScaleParser";
|
||||
if (op == nullptr) {
|
||||
MS_LOG(ERROR) << "op is null";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
op->primitive = std::make_unique<schema::PrimitiveT>();
|
||||
if (op->primitive == nullptr) {
|
||||
MS_LOG(ERROR) << "op->primitive is null";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
|
||||
std::unique_ptr<schema::ScaleT> attr = std::make_unique<schema::ScaleT>();
|
||||
if (attr == nullptr) {
|
||||
MS_LOG(ERROR) << "new op failed";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
|
||||
if (weight.blobs_size() + weight.bottom_size() < 2) {
|
||||
// MS_LOGE("Scale bottom size:%d, blobs size:%d invalid in layer %s", weight.bottom_size(), weight.blobs_size(),
|
||||
// weight.name().c_str());
|
||||
MS_LOG(ERROR) << "Scale bottom size:" << weight.bottom_size() << ", blobs size:" << weight.blobs_size()
|
||||
<< " invalid in layer " << weight.name().c_str();
|
||||
return RET_ERROR;
|
||||
}
|
||||
|
||||
|
@ -37,7 +54,8 @@ STATUS CaffeScaleParser::Parse(const caffe::LayerParameter &proto, const caffe::
|
|||
if (scaleParam.has_axis()) {
|
||||
uint32_t axis_index = NCHW_DIM_C;
|
||||
if (GetAxisIndex(scaleParam.axis(), &axis_index)) {
|
||||
// MS_LOGE("scale get axis failed for layer %s.", weight.name().c_str());
|
||||
MS_LOG(ERROR) << "scale get axis failed for layer " << weight.name().c_str();
|
||||
return RET_ERROR;
|
||||
}
|
||||
}
|
||||
attr->axis = axis;
|
||||
|
@ -46,14 +64,14 @@ STATUS CaffeScaleParser::Parse(const caffe::LayerParameter &proto, const caffe::
|
|||
if (weight.blobs().size() == 1) {
|
||||
auto scale = ConvertWeight(weight.blobs(0));
|
||||
if (scale == nullptr) {
|
||||
// MS_LOGE("Scale Convert blobs(0) for layer %s failed.", weight.name().c_str());
|
||||
MS_LOG(ERROR) << "Scale Convert blobs(0) for layer " << weight.name().c_str() << " failed.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
weightVec->push_back(scale);
|
||||
} else if (weight.blobs().size() >= 2) {
|
||||
auto scale = ConvertWeight(weight.blobs(0));
|
||||
if (scale == nullptr) {
|
||||
// MS_LOGE("Scale Convert blobs(0) for layer %s failed.", weight.name().c_str());
|
||||
MS_LOG(ERROR) << "Scale Convert blobs(0) for layer " << weight.name().c_str() << " failed.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
weightVec->push_back(scale);
|
||||
|
@ -63,26 +81,27 @@ STATUS CaffeScaleParser::Parse(const caffe::LayerParameter &proto, const caffe::
|
|||
if (scaleBias) {
|
||||
auto bias = ConvertWeight(weight.blobs_size() > 1 ? weight.blobs(1) : weight.blobs(0));
|
||||
if (bias == nullptr) {
|
||||
// MS_LOGE("Scale Convert blobs(1) for layer %s failed.", weight.name().c_str());
|
||||
MS_LOG(ERROR) << "Scale Convert blobs(1) for layer " << weight.name().c_str() << " failed.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
weightVec->push_back(bias);
|
||||
}
|
||||
}
|
||||
op->primitive = std::make_unique<schema::PrimitiveT>();
|
||||
op->primitive->value.value = attr.release();
|
||||
|
||||
op->name = proto.name();
|
||||
op->primitive->value.type = schema::PrimitiveType_Scale;
|
||||
op->primitive->value.value = attr.release();
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
STATUS CaffeScaleParser::GetAxisIndex(const int32_t &axis, uint32_t *axis_index) {
|
||||
if (axis < -DIM_DEFAULT_SIZE || axis >= DIM_DEFAULT_SIZE) {
|
||||
// MS_LOGE("Scale axis value(%d) is not correct, ", axis);
|
||||
return RET_PARAM_INVALID;
|
||||
MS_LOG(ERROR) << "Scale axis value(" << axis << ") is not correct";
|
||||
return RET_ERROR;
|
||||
}
|
||||
|
||||
if (axis == -1) {
|
||||
// MS_LOGW("axis with -1 may lead to calculation errors when input less than 4 dims.");
|
||||
MS_LOG(WARNING) << "axis with -1 may lead to calculation errors when input less than 4 dims.";
|
||||
}
|
||||
|
||||
*axis_index = (axis + DIM_DEFAULT_SIZE) % DIM_DEFAULT_SIZE;
|
||||
|
|
|
@ -14,8 +14,8 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_SCALE_PARSER_H_
|
||||
#define MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_SCALE_PARSER_H_
|
||||
#ifndef MMINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_SCALE_PARSER_H_
|
||||
#define MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_SCALE_PARSER_H_
|
||||
|
||||
#include <vector>
|
||||
#include "mindspore/lite/tools/converter/parser/caffe/caffe_node_parser.h"
|
||||
|
@ -27,13 +27,16 @@ class CaffeScaleParser : public CaffeNodeParser {
|
|||
public:
|
||||
CaffeScaleParser() : CaffeNodeParser("scale") {}
|
||||
|
||||
STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op,
|
||||
STATUS Parse(const caffe::LayerParameter &proto,
|
||||
const caffe::LayerParameter &weight,
|
||||
schema::CNodeT *op,
|
||||
std::vector<schema::TensorT *> *weightVec) override;
|
||||
|
||||
STATUS GetAxisIndex(const int32_t &axis, uint32_t *axis_index);
|
||||
STATUS GetAxisIndex(const int32_t &axis,
|
||||
uint32_t *axis_index);
|
||||
};
|
||||
} // namespace lite
|
||||
} // namespace mindspore
|
||||
|
||||
#endif // MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_SCALE_PARSER_H_
|
||||
#endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_SCALE_PARSER_H_
|
||||
|
||||
|
|
|
@ -14,8 +14,8 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include <memory>
|
||||
#include "mindspore/lite/tools/converter/parser/caffe/caffe_sigmoid_parser.h"
|
||||
#include <memory>
|
||||
|
||||
namespace mindspore {
|
||||
namespace lite {
|
||||
|
@ -23,11 +23,28 @@ STATUS CaffeSigmoidParser::Parse(const caffe::LayerParameter &proto,
|
|||
const caffe::LayerParameter &weight,
|
||||
schema::CNodeT *op,
|
||||
std::vector<schema::TensorT *> *weightVec) {
|
||||
std::unique_ptr<schema::ActivationT> attr = std::make_unique<schema::ActivationT>();
|
||||
attr->type = schema::ActivationType_SIGMOID;
|
||||
MS_LOG(DEBUG) << "parse CaffeSigmoidParser";
|
||||
if (op == nullptr) {
|
||||
MS_LOG(ERROR) << "op is null";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
op->primitive = std::make_unique<schema::PrimitiveT>();
|
||||
op->primitive->value.value = attr.release();
|
||||
if (op->primitive == nullptr) {
|
||||
MS_LOG(ERROR) << "op->primitive is null";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
|
||||
std::unique_ptr<schema::ActivationT> attr = std::make_unique<schema::ActivationT>();
|
||||
if (attr == nullptr) {
|
||||
MS_LOG(ERROR) << "new op failed";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
|
||||
attr->type = schema::ActivationType_SIGMOID;
|
||||
|
||||
op->name = proto.name();
|
||||
op->primitive->value.type = schema::PrimitiveType_Activation;
|
||||
op->primitive->value.value = attr.release();
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
|
|
|
@ -14,8 +14,8 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_SIGMOID_PARSER_H_
|
||||
#define MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_SIGMOID_PARSER_H_
|
||||
#ifndef MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_SIGMOID_PARSER_H_
|
||||
#define MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_SIGMOID_PARSER_H_
|
||||
|
||||
#include <vector>
|
||||
#include "mindspore/lite/tools/converter/parser/caffe/caffe_node_parser.h"
|
||||
|
@ -27,11 +27,13 @@ class CaffeSigmoidParser : public CaffeNodeParser {
|
|||
public:
|
||||
CaffeSigmoidParser() : CaffeNodeParser("sigmoid") {}
|
||||
|
||||
STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op,
|
||||
STATUS Parse(const caffe::LayerParameter &proto,
|
||||
const caffe::LayerParameter &weight,
|
||||
schema::CNodeT *op,
|
||||
std::vector<schema::TensorT *> *weightVec) override;
|
||||
};
|
||||
} // namespace lite
|
||||
} // namespace mindspore
|
||||
|
||||
#endif // MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_SIGMOID_PARSER_H_
|
||||
#endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_SIGMOID_PARSER_H_
|
||||
|
||||
|
|
|
@ -14,9 +14,8 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include <memory>
|
||||
#include "mindspore/lite/tools/converter/parser/caffe/caffe_softmax_parser.h"
|
||||
#include "utils/log_adapter.h"
|
||||
#include <memory>
|
||||
|
||||
static const int32_t CAFFE_SOFTMAX_DEFAULT_AXIS = 1;
|
||||
|
||||
|
@ -26,7 +25,23 @@ STATUS CaffeSoftmaxParser::Parse(const caffe::LayerParameter &proto,
|
|||
const caffe::LayerParameter &weight,
|
||||
schema::CNodeT *op,
|
||||
std::vector<schema::TensorT *> *weightVec) {
|
||||
MS_LOG(DEBUG) << "parse CaffeSoftmaxParser";
|
||||
if (op == nullptr) {
|
||||
MS_LOG(ERROR) << "op is null";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
op->primitive = std::make_unique<schema::PrimitiveT>();
|
||||
if (op->primitive == nullptr) {
|
||||
MS_LOG(ERROR) << "op->primitive is null";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
|
||||
std::unique_ptr<schema::SoftMaxT> attr = std::make_unique<schema::SoftMaxT>();
|
||||
if (attr == nullptr) {
|
||||
MS_LOG(ERROR) << "new op failed";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
|
||||
if (proto.has_softmax_param() && proto.softmax_param().has_axis()) {
|
||||
if (proto.softmax_param().axis() == -1) {
|
||||
MS_LOG(ERROR) << "axis with -1 may lead to calculation errors when input less than 4 dims.";
|
||||
|
@ -35,9 +50,10 @@ STATUS CaffeSoftmaxParser::Parse(const caffe::LayerParameter &proto,
|
|||
} else {
|
||||
attr->axis = CAFFE_SOFTMAX_DEFAULT_AXIS;
|
||||
}
|
||||
op->primitive = std::make_unique<schema::PrimitiveT>();
|
||||
op->primitive->value.value = attr.release();
|
||||
|
||||
op->name = proto.name();
|
||||
op->primitive->value.type = schema::PrimitiveType_SoftMax;
|
||||
op->primitive->value.value = attr.release();
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
|
|
|
@ -14,8 +14,8 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_SOFTMAX_PARSER_H_
|
||||
#define MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_SOFTMAX_PARSER_H_
|
||||
#ifndef MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_SOFTMAX_PARSER_H_
|
||||
#define MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_SOFTMAX_PARSER_H_
|
||||
|
||||
#include <vector>
|
||||
#include "mindspore/lite/tools/converter/parser/caffe/caffe_node_parser.h"
|
||||
|
@ -27,11 +27,13 @@ class CaffeSoftmaxParser : public CaffeNodeParser {
|
|||
public:
|
||||
CaffeSoftmaxParser() : CaffeNodeParser("softmax") {}
|
||||
|
||||
STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op,
|
||||
STATUS Parse(const caffe::LayerParameter &proto,
|
||||
const caffe::LayerParameter &weight,
|
||||
schema::CNodeT *op,
|
||||
std::vector<schema::TensorT *> *weightVec) override;
|
||||
};
|
||||
} // namespace lite
|
||||
} // namespace mindspore
|
||||
|
||||
#endif // MINDSPORE_CCSRC_TOOLS_LITE_CONVERTER_PARSER_CAFFE_CAFFE_SOFTMAX_PARSER_H_
|
||||
#endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_SOFTMAX_PARSER_H_
|
||||
|
||||
|
|
|
@ -14,9 +14,9 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "mindspore/lite/tools/converter/parser/caffe/caffe_tile_parser.h"
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
#include "mindspore/lite/tools/converter/parser/caffe/caffe_tile_parser.h"
|
||||
|
||||
namespace mindspore {
|
||||
namespace lite {
|
||||
|
@ -24,9 +24,24 @@ STATUS CaffeTileParser::Parse(const caffe::LayerParameter &proto,
|
|||
const caffe::LayerParameter &weight,
|
||||
schema::CNodeT *op,
|
||||
std::vector<schema::TensorT *> *weightVec) {
|
||||
std::unique_ptr<schema::TileT> attr = std::make_unique<schema::TileT>();
|
||||
const caffe::TileParameter tile_param = proto.tile_param();
|
||||
MS_LOG(DEBUG) << "parse CaffeTileParser";
|
||||
if (op == nullptr) {
|
||||
MS_LOG(ERROR) << "op is null";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
op->primitive = std::make_unique<schema::PrimitiveT>();
|
||||
if (op->primitive == nullptr) {
|
||||
MS_LOG(ERROR) << "op->primitive is null";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
|
||||
std::unique_ptr<schema::TileT> attr = std::make_unique<schema::TileT>();
|
||||
if (attr == nullptr) {
|
||||
MS_LOG(ERROR) << "new op failed";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
|
||||
const caffe::TileParameter tile_param = proto.tile_param();
|
||||
std::vector<int> dims;
|
||||
std::vector<int> multiples;
|
||||
dims.clear();
|
||||
|
@ -41,11 +56,12 @@ STATUS CaffeTileParser::Parse(const caffe::LayerParameter &proto,
|
|||
} else {
|
||||
multiples.push_back(1);
|
||||
}
|
||||
|
||||
attr->dims = dims;
|
||||
attr->multiples = multiples;
|
||||
op->primitive = std::make_unique<schema::PrimitiveT>();
|
||||
op->primitive->value.value = attr.release();
|
||||
|
||||
op->primitive->value.type = schema::PrimitiveType_Tile;
|
||||
op->primitive->value.value = attr.release();
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
|
|
|
@ -14,8 +14,8 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef LITE_CAFFE_TILE_PARSER_H
|
||||
#define LITE_CAFFE_TILE_PARSER_H
|
||||
#ifndef MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_TILE_PARSER_H
|
||||
#define MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_TILE_PARSER_H
|
||||
|
||||
#include <vector>
|
||||
#include "mindspore/lite/tools/converter/parser/caffe/caffe_node_parser.h"
|
||||
|
@ -27,10 +27,12 @@ class CaffeTileParser : public CaffeNodeParser {
|
|||
public:
|
||||
CaffeTileParser() : CaffeNodeParser("tile") {}
|
||||
|
||||
STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op,
|
||||
STATUS Parse(const caffe::LayerParameter &proto,
|
||||
const caffe::LayerParameter &weight,
|
||||
schema::CNodeT *op,
|
||||
std::vector<schema::TensorT *> *weightVec) override;
|
||||
};
|
||||
} // namespace lite
|
||||
} // namespace mindspore
|
||||
|
||||
#endif // LITE_CAFFE_TILE_PARSER_H
|
||||
#endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_TILE_PARSER_H
|
||||
|
|
|
@ -38,7 +38,12 @@ STATUS TfliteActivationParser::Parse(const std::unique_ptr<tflite::OperatorT> &t
|
|||
MS_LOG(ERROR) << "op->primitive is null";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
|
||||
std::unique_ptr<schema::ActivationT> attr = std::make_unique<schema::ActivationT>();
|
||||
if (attr == nullptr) {
|
||||
MS_LOG(ERROR) << "new op failed";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
|
||||
std::vector<std::string> node_name_str;
|
||||
Split(op->name, &node_name_str, "-");
|
||||
|
@ -46,6 +51,7 @@ STATUS TfliteActivationParser::Parse(const std::unique_ptr<tflite::OperatorT> &t
|
|||
if (std::strcmp(node_name, "Relu") == 0) {
|
||||
MS_LOG(DEBUG) << "parse TfliteReluParser";
|
||||
attr->type = schema::ActivationType_RELU;
|
||||
|
||||
} else if (std::strcmp(node_name, "Relu6") == 0) {
|
||||
MS_LOG(DEBUG) << "parse TfliteRelu6Parser";
|
||||
attr->type = schema::ActivationType_RELU6;
|
||||
|
@ -58,9 +64,16 @@ STATUS TfliteActivationParser::Parse(const std::unique_ptr<tflite::OperatorT> &t
|
|||
} else if (std::strcmp(node_name, "HardSwish") == 0) {
|
||||
MS_LOG(DEBUG) << "parse TfliteHardSwishParser";
|
||||
attr->type = schema::ActivationType_SIGMOID;
|
||||
} else if (std::strcmp(node_name, "LeakyRelu") == 0) {
|
||||
const auto &tflite_attr = tflite_op->builtin_options.AsLeakyReluOptions();
|
||||
if (tflite_attr == nullptr) {
|
||||
MS_LOG(ERROR) << "get op: " << op->name.c_str() << " attr failed";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
attr->alpha = tflite_attr->alpha;
|
||||
attr->type = schema::ActivationType_SIGMOID;
|
||||
}
|
||||
|
||||
attr->alpha = 0.2f;
|
||||
op->primitive->value.type = schema::PrimitiveType_Activation;
|
||||
op->primitive->value.value = attr.release();
|
||||
|
||||
|
@ -89,7 +102,12 @@ STATUS TflitePreluParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite
|
|||
MS_LOG(ERROR) << "op->primitive is null";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
|
||||
std::unique_ptr<schema::PreluT> attr = std::make_unique<schema::PreluT>();
|
||||
if (attr == nullptr) {
|
||||
MS_LOG(ERROR) << "new op failed";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
|
||||
if (GetTfliteData(tflite_op->inputs[1], tflite_tensors, tflite_model_buffer, attr->slope)) {
|
||||
MS_LOG(ERROR) << "get pRelu -> slope failed";
|
||||
|
@ -105,43 +123,6 @@ STATUS TflitePreluParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite
|
|||
return RET_OK;
|
||||
}
|
||||
|
||||
STATUS TfliteLeakyReluParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op,
|
||||
const std::vector<std::unique_ptr<tflite::TensorT>> &tflite_tensors,
|
||||
const std::vector<std::unique_ptr<tflite::BufferT>> &tflite_model_buffer,
|
||||
schema::CNodeT *op,
|
||||
std::vector<int32_t> *tensors_id,
|
||||
std::vector<schema::Format> *tensors_format,
|
||||
std::map<int, int> *tensors_id_map) {
|
||||
MS_LOG(DEBUG) << "parse TfliteLeakyReluParser";
|
||||
|
||||
if (op == nullptr) {
|
||||
MS_LOG(ERROR) << "op is null";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
op->primitive = std::make_unique<schema::PrimitiveT>();
|
||||
if (op->primitive == nullptr) {
|
||||
MS_LOG(ERROR) << "op->primitive is null";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
|
||||
std::unique_ptr<schema::LeakyReLUT> attr = std::make_unique<schema::LeakyReLUT>();
|
||||
|
||||
const auto &tflite_attr = tflite_op->builtin_options.AsLeakyReluOptions();
|
||||
if (tflite_attr == nullptr) {
|
||||
MS_LOG(ERROR) << "get op: " << op->name.c_str() << " attr failed";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
attr->negativeSlope = tflite_attr->alpha;
|
||||
|
||||
op->primitive->value.type = schema::PrimitiveType_LeakyReLU;
|
||||
op->primitive->value.value = attr.release();
|
||||
|
||||
AddOpInput(op, tensors_id, tensors_format, tensors_id_map,
|
||||
tflite_op->inputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC);
|
||||
AddOpOutput(op, tensors_id, tensors_format, tensors_id_map,
|
||||
tflite_op->outputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC);
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
TfliteNodeRegister g_TfliteReluParser("Relu", new TfliteReluParser());
|
||||
TfliteNodeRegister g_TfliteRelu6Parser("Relu6", new TfliteRelu6Parser());
|
||||
|
|
|
@ -63,6 +63,11 @@ class TfliteHardSwishParser : public TfliteActivationParser {
|
|||
TfliteHardSwishParser() : TfliteActivationParser() {}
|
||||
};
|
||||
|
||||
class TfliteLeakyReluParser : public TfliteActivationParser {
|
||||
public:
|
||||
TfliteLeakyReluParser() : TfliteActivationParser() {}
|
||||
};
|
||||
|
||||
class TflitePreluParser : public TfliteNodeParser {
|
||||
public:
|
||||
TflitePreluParser() : TfliteNodeParser("Prelu") {}
|
||||
|
@ -75,19 +80,6 @@ class TflitePreluParser : public TfliteNodeParser {
|
|||
std::vector<schema::Format> *tensors_format,
|
||||
std::map<int, int> *tensors_id_map) override;
|
||||
};
|
||||
|
||||
class TfliteLeakyReluParser : public TfliteNodeParser {
|
||||
public:
|
||||
TfliteLeakyReluParser() : TfliteNodeParser("LeakyRelu") {}
|
||||
|
||||
STATUS Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op,
|
||||
const std::vector<std::unique_ptr<tflite::TensorT>> &tflite_tensors,
|
||||
const std::vector<std::unique_ptr<tflite::BufferT>> &tflite_model_buffer,
|
||||
schema::CNodeT *op,
|
||||
std::vector<int32_t> *tensors_id,
|
||||
std::vector<schema::Format> *tensors_format,
|
||||
std::map<int, int> *tensors_id_map) override;
|
||||
};
|
||||
} // namespace lite
|
||||
} // namespace mindspore
|
||||
|
||||
|
|
|
@ -30,8 +30,6 @@ STATUS TfliteAddNParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_
|
|||
std::vector<schema::Format> *tensors_format,
|
||||
std::map<int, int> *tensors_id_map) {
|
||||
MS_LOG(DEBUG) << "parse TfliteAddNParser";
|
||||
|
||||
// set attr
|
||||
if (op == nullptr) {
|
||||
MS_LOG(ERROR) << "op is null";
|
||||
return RET_NULL_PTR;
|
||||
|
@ -43,12 +41,15 @@ STATUS TfliteAddNParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_
|
|||
}
|
||||
|
||||
std::unique_ptr<schema::AddNT> attr = std::make_unique<schema::AddNT>();
|
||||
if (attr == nullptr) {
|
||||
MS_LOG(ERROR) << "new op failed";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
|
||||
attr->N = tflite_tensors.size() - 1;
|
||||
op->primitive->value.type = schema::PrimitiveType_AddN;
|
||||
op->primitive->value.value = attr.release();
|
||||
|
||||
// set input
|
||||
for (size_t i = 0; i < tflite_op->inputs.size(); i++) {
|
||||
AddOpInput(op, tensors_id, tensors_format, tensors_id_map,
|
||||
tflite_op->inputs[i], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC);
|
||||
|
|
|
@ -29,7 +29,6 @@ STATUS TfliteArgmaxParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflit
|
|||
std::vector<schema::Format> *tensors_format,
|
||||
std::map<int, int> *tensors_id_map) {
|
||||
MS_LOG(DEBUG) << "parse TfliteArgmaxParser";
|
||||
|
||||
if (op == nullptr) {
|
||||
MS_LOG(ERROR) << "op is null";
|
||||
return RET_NULL_PTR;
|
||||
|
@ -41,6 +40,10 @@ STATUS TfliteArgmaxParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflit
|
|||
}
|
||||
|
||||
std::unique_ptr<schema::ArgMaxT> attr = std::make_unique<schema::ArgMaxT>();
|
||||
if (attr == nullptr) {
|
||||
MS_LOG(ERROR) << "new op failed";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
|
||||
attr->outMaxValue = false;
|
||||
attr->topK = 1;
|
||||
|
|
|
@ -29,7 +29,6 @@ STATUS TfliteArgminParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflit
|
|||
std::vector<schema::Format> *tensors_format,
|
||||
std::map<int, int> *tensors_id_map) {
|
||||
MS_LOG(DEBUG) << "parse TfliteArgminParser";
|
||||
|
||||
if (op == nullptr) {
|
||||
MS_LOG(ERROR) << "op is null";
|
||||
return RET_NULL_PTR;
|
||||
|
@ -41,6 +40,10 @@ STATUS TfliteArgminParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflit
|
|||
}
|
||||
|
||||
std::unique_ptr<schema::ArgMinT> attr = std::make_unique<schema::ArgMinT>();
|
||||
if (attr == nullptr) {
|
||||
MS_LOG(ERROR) << "new op failed";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
|
||||
attr->outMaxValue = false;
|
||||
attr->topK = 1;
|
||||
|
|
|
@ -45,6 +45,10 @@ STATUS TfliteDoubleInputOpParser::Parse(const std::unique_ptr<tflite::OperatorT>
|
|||
if (std::strcmp(node_name, "Add") == 0) {
|
||||
MS_LOG(DEBUG) << "parse TfliteAddParser";
|
||||
std::unique_ptr<schema::AddT> attr = std::make_unique<schema::AddT>();
|
||||
if (attr == nullptr) {
|
||||
MS_LOG(ERROR) << "new op failed";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
const auto &tfliteAttr = tflite_op->builtin_options.AsAddOptions();
|
||||
if (nullptr == tfliteAttr) {
|
||||
MS_LOG(ERROR) << "get op: " << op->name.c_str() << " attr failed";
|
||||
|
@ -56,6 +60,10 @@ STATUS TfliteDoubleInputOpParser::Parse(const std::unique_ptr<tflite::OperatorT>
|
|||
} else if (std::strcmp(node_name, "Sub") == 0) {
|
||||
MS_LOG(DEBUG) << "parse TfliteSubParser";
|
||||
std::unique_ptr<schema::SubT> attr = std::make_unique<schema::SubT>();
|
||||
if (attr == nullptr) {
|
||||
MS_LOG(ERROR) << "new op failed";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
const auto &tfliteAttr = tflite_op->builtin_options.AsSubOptions();
|
||||
if (nullptr == tfliteAttr) {
|
||||
MS_LOG(ERROR) << "get op: " << op->name.c_str() << " attr failed";
|
||||
|
@ -67,6 +75,10 @@ STATUS TfliteDoubleInputOpParser::Parse(const std::unique_ptr<tflite::OperatorT>
|
|||
} else if (std::strcmp(node_name, "Mul") == 0) {
|
||||
MS_LOG(DEBUG) << "parse TfliteMulParser";
|
||||
std::unique_ptr<schema::MulT> attr = std::make_unique<schema::MulT>();
|
||||
if (attr == nullptr) {
|
||||
MS_LOG(ERROR) << "new op failed";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
const auto &tfliteAttr = tflite_op->builtin_options.AsMulOptions();
|
||||
if (nullptr == tfliteAttr) {
|
||||
MS_LOG(ERROR) << "get op: " << op->name.c_str() << " attr failed";
|
||||
|
@ -78,6 +90,10 @@ STATUS TfliteDoubleInputOpParser::Parse(const std::unique_ptr<tflite::OperatorT>
|
|||
} else if (std::strcmp(node_name, "Div") == 0) {
|
||||
MS_LOG(DEBUG) << "parse TfliteDivParser";
|
||||
std::unique_ptr<schema::DivT> attr = std::make_unique<schema::DivT>();
|
||||
if (attr == nullptr) {
|
||||
MS_LOG(ERROR) << "new op failed";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
const auto &tfliteAttr = tflite_op->builtin_options.AsDivOptions();
|
||||
if (nullptr == tfliteAttr) {
|
||||
MS_LOG(ERROR) << "get op: " << op->name.c_str() << " attr failed";
|
||||
|
@ -89,26 +105,46 @@ STATUS TfliteDoubleInputOpParser::Parse(const std::unique_ptr<tflite::OperatorT>
|
|||
} else if (std::strcmp(node_name, "FloorDiv") == 0) {
|
||||
MS_LOG(DEBUG) << "parse TfliteFloorDivParser";
|
||||
std::unique_ptr<schema::FloorDivT> attr = std::make_unique<schema::FloorDivT>();
|
||||
if (attr == nullptr) {
|
||||
MS_LOG(ERROR) << "new op failed";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
op->primitive->value.type = schema::PrimitiveType_FloorDiv;
|
||||
op->primitive->value.value = attr.release();
|
||||
} else if (std::strcmp(node_name, "FloorMod") == 0) {
|
||||
MS_LOG(DEBUG) << "parse TfliteFloorModParser";
|
||||
std::unique_ptr<schema::FloorModT> attr = std::make_unique<schema::FloorModT>();
|
||||
if (attr == nullptr) {
|
||||
MS_LOG(ERROR) << "new op failed";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
op->primitive->value.type = schema::PrimitiveType_FloorMod;
|
||||
op->primitive->value.value = attr.release();
|
||||
} else if (std::strcmp(node_name, "RealDiv") == 0) {
|
||||
MS_LOG(DEBUG) << "parse TfliteRealDivParser";
|
||||
std::unique_ptr<schema::RealDivT> attr = std::make_unique<schema::RealDivT>();
|
||||
if (attr == nullptr) {
|
||||
MS_LOG(ERROR) << "new op failed";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
op->primitive->value.type = schema::PrimitiveType_Div;
|
||||
op->primitive->value.value = attr.release();
|
||||
} else if (std::strcmp(node_name, "SquaredDifference") == 0) {
|
||||
MS_LOG(DEBUG) << "parse TfliteSquaredDifferenceParser";
|
||||
std::unique_ptr<schema::SquaredDifferenceT> attr = std::make_unique<schema::SquaredDifferenceT>();
|
||||
if (attr == nullptr) {
|
||||
MS_LOG(ERROR) << "new op failed";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
op->primitive->value.type = schema::PrimitiveType_SquaredDifference;
|
||||
op->primitive->value.value = attr.release();
|
||||
} else if (std::strcmp(node_name, "Pow") == 0) {
|
||||
MS_LOG(DEBUG) << "parse TflitePowParser";
|
||||
std::unique_ptr<schema::PowerT> attr = std::make_unique<schema::PowerT>();
|
||||
if (attr == nullptr) {
|
||||
MS_LOG(ERROR) << "new op failed";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
attr->power = 0.0f;
|
||||
attr->scale = 1.0f;
|
||||
attr->shift = 0.0f;
|
||||
|
@ -117,11 +153,19 @@ STATUS TfliteDoubleInputOpParser::Parse(const std::unique_ptr<tflite::OperatorT>
|
|||
} else if (std::strcmp(node_name, "Maximum") == 0) {
|
||||
MS_LOG(DEBUG) << "parse TfliteMaximumParser";
|
||||
std::unique_ptr<schema::MaximumT> attr = std::make_unique<schema::MaximumT>();
|
||||
if (attr == nullptr) {
|
||||
MS_LOG(ERROR) << "new op failed";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
op->primitive->value.type = schema::PrimitiveType_Maximum;
|
||||
op->primitive->value.value = attr.release();
|
||||
} else if (std::strcmp(node_name, "Minimum") == 0) {
|
||||
MS_LOG(DEBUG) << "parse TfliteMinimumParser";
|
||||
std::unique_ptr<schema::MinimumT> attr = std::make_unique<schema::MinimumT>();
|
||||
if (attr == nullptr) {
|
||||
MS_LOG(ERROR) << "new op failed";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
op->primitive->value.type = schema::PrimitiveType_Minimum;
|
||||
op->primitive->value.value = attr.release();
|
||||
}
|
||||
|
@ -159,56 +203,100 @@ STATUS TfliteSingleInputOpParser::Parse(const std::unique_ptr<tflite::OperatorT>
|
|||
if (std::strcmp(node_name, "Abs") == 0) {
|
||||
MS_LOG(DEBUG) << "parse TfliteAbsParser";
|
||||
std::unique_ptr<schema::AbsT> attr = std::make_unique<schema::AbsT>();
|
||||
if (attr == nullptr) {
|
||||
MS_LOG(ERROR) << "new op failed";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
op->primitive->value.type = schema::PrimitiveType_Abs;
|
||||
op->primitive->value.value = attr.release();
|
||||
} else if (std::strcmp(node_name, "Exp") == 0) {
|
||||
MS_LOG(DEBUG) << "parse TfliteExpParser";
|
||||
std::unique_ptr<schema::ExpT> attr = std::make_unique<schema::ExpT>();
|
||||
if (attr == nullptr) {
|
||||
MS_LOG(ERROR) << "new op failed";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
op->primitive->value.type = schema::PrimitiveType_Exp;
|
||||
op->primitive->value.value = attr.release();
|
||||
} else if (std::strcmp(node_name, "Sqrt") == 0) {
|
||||
MS_LOG(DEBUG) << "parse TfliteSqrtParser";
|
||||
std::unique_ptr<schema::SqrtT> attr = std::make_unique<schema::SqrtT>();
|
||||
if (attr == nullptr) {
|
||||
MS_LOG(ERROR) << "new op failed";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
op->primitive->value.type = schema::PrimitiveType_Sqrt;
|
||||
op->primitive->value.value = attr.release();
|
||||
} else if (std::strcmp(node_name, "Rsqrt") == 0) {
|
||||
MS_LOG(DEBUG) << "parse TfliteRsqrtParser";
|
||||
std::unique_ptr<schema::RsqrtT> attr = std::make_unique<schema::RsqrtT>();
|
||||
if (attr == nullptr) {
|
||||
MS_LOG(ERROR) << "new op failed";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
op->primitive->value.type = schema::PrimitiveType_Rsqrt;
|
||||
op->primitive->value.value = attr.release();
|
||||
} else if (std::strcmp(node_name, "Square") == 0) {
|
||||
MS_LOG(DEBUG) << "parse TfliteSquareParser";
|
||||
std::unique_ptr<schema::SquareT> attr = std::make_unique<schema::SquareT>();
|
||||
if (attr == nullptr) {
|
||||
MS_LOG(ERROR) << "new op failed";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
op->primitive->value.type = schema::PrimitiveType_Square;
|
||||
op->primitive->value.value = attr.release();
|
||||
} else if (std::strcmp(node_name, "Sin") == 0) {
|
||||
MS_LOG(DEBUG) << "parse TfliteSinParser";
|
||||
std::unique_ptr<schema::SinT> attr = std::make_unique<schema::SinT>();
|
||||
if (attr == nullptr) {
|
||||
MS_LOG(ERROR) << "new op failed";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
op->primitive->value.type = schema::PrimitiveType_Sin;
|
||||
op->primitive->value.value = attr.release();
|
||||
} else if (std::strcmp(node_name, "Cos") == 0) {
|
||||
MS_LOG(DEBUG) << "parse TfliteCosParser";
|
||||
std::unique_ptr<schema::CosT> attr = std::make_unique<schema::CosT>();
|
||||
if (attr == nullptr) {
|
||||
MS_LOG(ERROR) << "new op failed";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
op->primitive->value.type = schema::PrimitiveType_Cos;
|
||||
op->primitive->value.value = attr.release();
|
||||
} else if (std::strcmp(node_name, "Log") == 0) {
|
||||
MS_LOG(DEBUG) << "parse TfliteLogParser";
|
||||
std::unique_ptr<schema::LogT> attr = std::make_unique<schema::LogT>();
|
||||
if (attr == nullptr) {
|
||||
MS_LOG(ERROR) << "new op failed";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
op->primitive->value.type = schema::PrimitiveType_Log;
|
||||
op->primitive->value.value = attr.release();
|
||||
} else if (std::strcmp(node_name, "Round") == 0) {
|
||||
MS_LOG(DEBUG) << "parse TfliteRoundParser";
|
||||
std::unique_ptr<schema::RoundT> attr = std::make_unique<schema::RoundT>();
|
||||
if (attr == nullptr) {
|
||||
MS_LOG(ERROR) << "new op failed";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
op->primitive->value.type = schema::PrimitiveType_Round;
|
||||
op->primitive->value.value = attr.release();
|
||||
} else if (std::strcmp(node_name, "Ceil") == 0) {
|
||||
MS_LOG(DEBUG) << "parse TfliteCeilParser";
|
||||
std::unique_ptr<schema::CeilT> attr = std::make_unique<schema::CeilT>();
|
||||
if (attr == nullptr) {
|
||||
MS_LOG(ERROR) << "new op failed";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
op->primitive->value.type = schema::PrimitiveType_Ceil;
|
||||
op->primitive->value.value = attr.release();
|
||||
} else if (std::strcmp(node_name, "flOOR") == 0) {
|
||||
MS_LOG(DEBUG) << "parse TfliteFloorParser";
|
||||
std::unique_ptr<schema::FloorT> attr = std::make_unique<schema::FloorT>();
|
||||
if (attr == nullptr) {
|
||||
MS_LOG(ERROR) << "new op failed";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
op->primitive->value.type = schema::PrimitiveType_Floor;
|
||||
op->primitive->value.value = attr.release();
|
||||
}
|
||||
|
@ -243,31 +331,55 @@ STATUS TfliteCompareOpParser::Parse(const std::unique_ptr<tflite::OperatorT> &tf
|
|||
if (std::strcmp(node_name, "Equal") == 0) {
|
||||
MS_LOG(DEBUG) << "parse TfliteEqualParser";
|
||||
std::unique_ptr<schema::EqualT> attr = std::make_unique<schema::EqualT>();
|
||||
if (attr == nullptr) {
|
||||
MS_LOG(ERROR) << "new op failed";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
op->primitive->value.type = schema::PrimitiveType_Equal;
|
||||
op->primitive->value.value = attr.release();
|
||||
} else if (std::strcmp(node_name, "NotEqual") == 0) {
|
||||
MS_LOG(DEBUG) << "parse TfliteNotEqualParser";
|
||||
std::unique_ptr<schema::NotEqualT> attr = std::make_unique<schema::NotEqualT>();
|
||||
if (attr == nullptr) {
|
||||
MS_LOG(ERROR) << "new op failed";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
op->primitive->value.type = schema::PrimitiveType_NotEqual;
|
||||
op->primitive->value.value = attr.release();
|
||||
} else if (std::strcmp(node_name, "Greater") == 0) {
|
||||
MS_LOG(DEBUG) << "parse TfliteGreaterParser";
|
||||
std::unique_ptr<schema::GreaterT> attr = std::make_unique<schema::GreaterT>();
|
||||
if (attr == nullptr) {
|
||||
MS_LOG(ERROR) << "new op failed";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
op->primitive->value.type = schema::PrimitiveType_Greater;
|
||||
op->primitive->value.value = attr.release();
|
||||
} else if (std::strcmp(node_name, "GreaterEqual") == 0) {
|
||||
MS_LOG(DEBUG) << "parse TfliteGreaterEqualParser";
|
||||
std::unique_ptr<schema::GreaterEqualT> attr = std::make_unique<schema::GreaterEqualT>();
|
||||
if (attr == nullptr) {
|
||||
MS_LOG(ERROR) << "new op failed";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
op->primitive->value.type = schema::PrimitiveType_GreaterEqual;
|
||||
op->primitive->value.value = attr.release();
|
||||
} else if (std::strcmp(node_name, "Less") == 0) {
|
||||
MS_LOG(DEBUG) << "parse TfliteLessParser";
|
||||
std::unique_ptr<schema::LessT> attr = std::make_unique<schema::LessT>();
|
||||
if (attr == nullptr) {
|
||||
MS_LOG(ERROR) << "new op failed";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
op->primitive->value.type = schema::PrimitiveType_Less;
|
||||
op->primitive->value.value = attr.release();
|
||||
} else if (std::strcmp(node_name, "LessEqual") == 0) {
|
||||
MS_LOG(DEBUG) << "parse TfliteLessEqualParser";
|
||||
std::unique_ptr<schema::LessEqualT> attr = std::make_unique<schema::LessEqualT>();
|
||||
if (attr == nullptr) {
|
||||
MS_LOG(ERROR) << "new op failed";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
op->primitive->value.type = schema::PrimitiveType_LessEqual;
|
||||
op->primitive->value.value = attr.release();
|
||||
}
|
||||
|
|
|
@ -50,6 +50,10 @@ STATUS TfliteBatchToSpaceParser::Parse(const std::unique_ptr<tflite::OperatorT>
|
|||
}
|
||||
|
||||
std::unique_ptr<schema::BatchToSpaceT> attr = std::make_unique<schema::BatchToSpaceT>();
|
||||
if (attr == nullptr) {
|
||||
MS_LOG(ERROR) << "new op failed";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
|
||||
if (GetTfliteData(tflite_op->inputs[1], tflite_tensors, tflite_model_buffer, attr->blockShape)) {
|
||||
MS_LOG(ERROR) << "get batchToSpace -> blockShape failed";
|
||||
|
|
|
@ -30,7 +30,6 @@ STATUS TfliteBroadcastToParser::Parse(const std::unique_ptr<tflite::OperatorT> &
|
|||
std::vector<schema::Format> *tensors_format,
|
||||
std::map<int, int> *tensors_id_map) {
|
||||
MS_LOG(DEBUG) << "parse TfliteBroadcastToParser";
|
||||
|
||||
if (op == nullptr) {
|
||||
MS_LOG(ERROR) << "op is null";
|
||||
return RET_NULL_PTR;
|
||||
|
@ -42,6 +41,10 @@ STATUS TfliteBroadcastToParser::Parse(const std::unique_ptr<tflite::OperatorT> &
|
|||
}
|
||||
|
||||
std::unique_ptr<schema::BroadcastToT> attr = std::make_unique<schema::BroadcastToT>();
|
||||
if (attr == nullptr) {
|
||||
MS_LOG(ERROR) << "new op failed";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
|
||||
if (GetTfliteData(tflite_op->inputs[1], tflite_tensors, tflite_model_buffer, attr->dst_shape)) {
|
||||
MS_LOG(ERROR) << "get broadCastTo -> dst_shape failed";
|
||||
|
|
|
@ -29,7 +29,6 @@ STATUS TfliteCastParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_
|
|||
std::vector<schema::Format> *tensors_format,
|
||||
std::map<int, int> *tensors_id_map) {
|
||||
MS_LOG(DEBUG) << "parse TfliteCastParser";
|
||||
|
||||
if (op == nullptr) {
|
||||
MS_LOG(ERROR) << "op is null";
|
||||
return RET_NULL_PTR;
|
||||
|
@ -41,6 +40,10 @@ STATUS TfliteCastParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_
|
|||
}
|
||||
|
||||
std::unique_ptr<schema::CastT> attr = std::make_unique<schema::CastT>();
|
||||
if (attr == nullptr) {
|
||||
MS_LOG(ERROR) << "new op failed";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
|
||||
const auto &in_tensor = tflite_tensors[tflite_op->inputs[0]];
|
||||
if (in_tensor == nullptr) {
|
||||
|
|
|
@ -29,8 +29,6 @@ STATUS TfliteConcatParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflit
|
|||
std::vector<schema::Format> *tensors_format,
|
||||
std::map<int, int> *tensors_id_map) {
|
||||
MS_LOG(DEBUG) << "parse TfliteConcatParser";
|
||||
|
||||
// set attr
|
||||
if (op == nullptr) {
|
||||
MS_LOG(ERROR) << "op is null";
|
||||
return RET_NULL_PTR;
|
||||
|
@ -42,6 +40,10 @@ STATUS TfliteConcatParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflit
|
|||
}
|
||||
|
||||
std::unique_ptr<schema::ConcatT> attr = std::make_unique<schema::ConcatT>();
|
||||
if (attr == nullptr) {
|
||||
MS_LOG(ERROR) << "new op failed";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
|
||||
const auto &tfliteAttr = tflite_op->builtin_options.AsConcatenationOptions();
|
||||
if (tfliteAttr == nullptr) {
|
||||
|
|
|
@ -29,7 +29,6 @@ STATUS TfliteConvParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_
|
|||
std::vector<schema::Format> *tensors_format,
|
||||
std::map<int, int> *tensors_id_map) {
|
||||
MS_LOG(DEBUG) << "parse TfliteConvParser";
|
||||
|
||||
if (op == nullptr) {
|
||||
MS_LOG(ERROR) << "op is null";
|
||||
return RET_NULL_PTR;
|
||||
|
@ -41,6 +40,11 @@ STATUS TfliteConvParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_
|
|||
}
|
||||
|
||||
std::unique_ptr<schema::Conv2DT> attr = std::make_unique<schema::Conv2DT>();
|
||||
if (attr == nullptr) {
|
||||
MS_LOG(ERROR) << "new op failed";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
|
||||
const auto &tflite_attr = tflite_op->builtin_options.AsConv2DOptions();
|
||||
if (tflite_attr == nullptr) {
|
||||
MS_LOG(ERROR) << "get op: " << op->name.c_str() << " attr failed";
|
||||
|
|
|
@ -29,7 +29,6 @@ STATUS TfliteDeConvParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflit
|
|||
std::vector<schema::Format> *tensors_format,
|
||||
std::map<int, int> *tensors_id_map) {
|
||||
MS_LOG(DEBUG) << "parse tflite Transpose_Conv parser";
|
||||
|
||||
if (op == nullptr) {
|
||||
MS_LOG(ERROR) << "op is null";
|
||||
return RET_NULL_PTR;
|
||||
|
@ -41,6 +40,11 @@ STATUS TfliteDeConvParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflit
|
|||
}
|
||||
|
||||
std::unique_ptr<schema::DeConv2DT> attr = std::make_unique<schema::DeConv2DT>();
|
||||
if (attr == nullptr) {
|
||||
MS_LOG(ERROR) << "new op failed";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
|
||||
const auto &tflite_attr = tflite_op->builtin_options.AsTransposeConvOptions();
|
||||
if (tflite_attr == nullptr) {
|
||||
MS_LOG(ERROR) << "get op: " << op->name.c_str() << " attr failed";
|
||||
|
|
|
@ -42,6 +42,10 @@ STATUS TfliteDepthToSpaceParser::Parse(const std::unique_ptr<tflite::OperatorT>
|
|||
}
|
||||
|
||||
std::unique_ptr<schema::DepthToSpaceT> attr = std::make_unique<schema::DepthToSpaceT>();
|
||||
if (attr == nullptr) {
|
||||
MS_LOG(ERROR) << "new op failed";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
|
||||
const auto &tflite_attr = tflite_op->builtin_options.AsDepthToSpaceOptions();
|
||||
if (tflite_attr == nullptr) {
|
||||
|
|
|
@ -29,7 +29,6 @@ STATUS TfliteDepthwiseConv2DParser::Parse(const std::unique_ptr<tflite::Operator
|
|||
std::vector<schema::Format> *tensors_format,
|
||||
std::map<int, int> *tensors_id_map) {
|
||||
MS_LOG(DEBUG) << "parse TfliteDepthwiseConv2DParser";
|
||||
|
||||
if (op == nullptr) {
|
||||
MS_LOG(ERROR) << "op is null";
|
||||
return RET_NULL_PTR;
|
||||
|
@ -41,6 +40,11 @@ STATUS TfliteDepthwiseConv2DParser::Parse(const std::unique_ptr<tflite::Operator
|
|||
}
|
||||
|
||||
std::unique_ptr<schema::DepthwiseConv2DT> attr = std::make_unique<schema::DepthwiseConv2DT>();
|
||||
if (attr == nullptr) {
|
||||
MS_LOG(ERROR) << "new op failed";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
|
||||
const auto &tflite_attr = tflite_op->builtin_options.AsDepthwiseConv2DOptions();
|
||||
if (tflite_attr == nullptr) {
|
||||
MS_LOG(ERROR) << "get op: " << op->name.c_str() << " attr failed";
|
||||
|
|
|
@ -28,7 +28,6 @@ STATUS TfliteDequantizeParser::Parse(const std::unique_ptr<tflite::OperatorT> &t
|
|||
std::vector<schema::Format> *tensors_format,
|
||||
std::map<int, int> *tensors_id_map) {
|
||||
MS_LOG(DEBUG) << "parse TfliteDequantizeNParser";
|
||||
|
||||
if (op == nullptr) {
|
||||
MS_LOG(ERROR) << "op is null";
|
||||
return RET_NULL_PTR;
|
||||
|
@ -40,8 +39,11 @@ STATUS TfliteDequantizeParser::Parse(const std::unique_ptr<tflite::OperatorT> &t
|
|||
}
|
||||
|
||||
std::unique_ptr<schema::CastT> attr = std::make_unique<schema::CastT>();
|
||||
if (attr == nullptr) {
|
||||
MS_LOG(ERROR) << "new op failed";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
|
||||
// get the dequantize input tensor
|
||||
const auto &in_tensor = tflite_tensors[tflite_op->inputs[0]];
|
||||
if (in_tensor == nullptr) {
|
||||
MS_LOG(ERROR) << "input tensor is null";
|
||||
|
|
|
@ -29,7 +29,6 @@ STATUS TfliteExpandDimsParser::Parse(const std::unique_ptr<tflite::OperatorT> &t
|
|||
std::vector<schema::Format> *tensors_format,
|
||||
std::map<int, int> *tensors_id_map) {
|
||||
MS_LOG(DEBUG) << "parse TfliteExpandDimsParser";
|
||||
|
||||
if (op == nullptr) {
|
||||
MS_LOG(ERROR) << "op is null";
|
||||
return RET_NULL_PTR;
|
||||
|
@ -41,6 +40,10 @@ STATUS TfliteExpandDimsParser::Parse(const std::unique_ptr<tflite::OperatorT> &t
|
|||
}
|
||||
|
||||
std::unique_ptr<schema::ExpandDimsT> attr = std::make_unique<schema::ExpandDimsT>();
|
||||
if (attr == nullptr) {
|
||||
MS_LOG(ERROR) << "new op failed";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
|
||||
const auto &tflite_attr = tflite_op->builtin_options.AsExpandDimsOptions();
|
||||
if (tflite_attr == nullptr) {
|
||||
|
@ -52,15 +55,6 @@ STATUS TfliteExpandDimsParser::Parse(const std::unique_ptr<tflite::OperatorT> &t
|
|||
|
||||
MS_LOG(ERROR) << "The attr dim is folded by TFLite.";
|
||||
return RET_ERROR;
|
||||
|
||||
/*
|
||||
if (op != nullptr) {
|
||||
op->primitive = std::make_unique<schema::PrimitiveT>();
|
||||
op->primitive->value.type = schema::PrimitiveType_ExpandDims;
|
||||
op->primitive->value.value = attr.release();
|
||||
}
|
||||
return RET_OK;
|
||||
*/
|
||||
}
|
||||
|
||||
TfliteNodeRegister g_tfliteExpandDimsParser("ExpandDims", new TfliteExpandDimsParser());
|
||||
|
|
|
@ -29,7 +29,6 @@ STATUS TfliteFillParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_
|
|||
std::vector<schema::Format> *tensors_format,
|
||||
std::map<int, int> *tensors_id_map) {
|
||||
MS_LOG(DEBUG) << "parse TfliteFillParser";
|
||||
|
||||
if (op == nullptr) {
|
||||
MS_LOG(ERROR) << "op is null";
|
||||
return RET_NULL_PTR;
|
||||
|
@ -41,6 +40,10 @@ STATUS TfliteFillParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_
|
|||
}
|
||||
|
||||
std::unique_ptr<schema::FillT> attr = std::make_unique<schema::FillT>();
|
||||
if (attr == nullptr) {
|
||||
MS_LOG(ERROR) << "new op failed";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
|
||||
if (tflite_op->inputs.size() > 1) {
|
||||
if (GetTfliteData(tflite_op->inputs[1], tflite_tensors, tflite_model_buffer, attr->dims)) {
|
||||
|
|
|
@ -18,7 +18,6 @@
|
|||
#include <vector>
|
||||
#include <memory>
|
||||
#include <map>
|
||||
#include <string>
|
||||
|
||||
namespace mindspore {
|
||||
namespace lite {
|
||||
|
@ -29,6 +28,7 @@ STATUS TfliteFullyConnectedParser::Parse(const std::unique_ptr<tflite::OperatorT
|
|||
std::vector<int32_t> *tensors_id,
|
||||
std::vector<schema::Format> *tensors_format,
|
||||
std::map<int, int> *tensors_id_map) {
|
||||
MS_LOG(DEBUG) << "parse TfliteFullyConnectedParser";
|
||||
if (op == nullptr) {
|
||||
MS_LOG(ERROR) << "op is null";
|
||||
return RET_NULL_PTR;
|
||||
|
@ -39,15 +39,11 @@ STATUS TfliteFullyConnectedParser::Parse(const std::unique_ptr<tflite::OperatorT
|
|||
return RET_NULL_PTR;
|
||||
}
|
||||
|
||||
std::vector<std::string> node_name_str;
|
||||
Split(op->name, &node_name_str, "-");
|
||||
const char *node_name = node_name_str.data()->c_str();
|
||||
if (std::strcmp(node_name, "FullyConnected") == 0) {
|
||||
MS_LOG(DEBUG) << "parse TfliteFullyConnectedParser";
|
||||
} else if (std::strcmp(node_name, "FakeQuant") == 0) {
|
||||
MS_LOG(DEBUG) << "parse TfliteFakeQuantParser";
|
||||
}
|
||||
std::unique_ptr<schema::FullConnectionT> attr = std::make_unique<schema::FullConnectionT>();
|
||||
if (attr == nullptr) {
|
||||
MS_LOG(ERROR) << "new op failed";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
|
||||
const auto &tflite_attr = tflite_op->builtin_options.AsFullyConnectedOptions();
|
||||
if (tflite_attr == nullptr) {
|
||||
|
|
|
@ -29,7 +29,6 @@ STATUS TfliteGatherNdParser::Parse(const std::unique_ptr<tflite::OperatorT> &tfl
|
|||
std::vector<schema::Format> *tensors_format,
|
||||
std::map<int, int> *tensors_id_map) {
|
||||
MS_LOG(DEBUG) << "parse TfliteGatherNdParser";
|
||||
|
||||
if (op == nullptr) {
|
||||
MS_LOG(ERROR) << "op is null";
|
||||
return RET_NULL_PTR;
|
||||
|
@ -41,6 +40,11 @@ STATUS TfliteGatherNdParser::Parse(const std::unique_ptr<tflite::OperatorT> &tfl
|
|||
}
|
||||
|
||||
std::unique_ptr<schema::GatherNdT> attr = std::make_unique<schema::GatherNdT>();
|
||||
if (attr == nullptr) {
|
||||
MS_LOG(ERROR) << "new op failed";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
|
||||
attr->batchDims = 0;
|
||||
|
||||
op->primitive->value.type = schema::PrimitiveType_GatherNd;
|
||||
|
|
|
@ -29,8 +29,6 @@ STATUS TfliteGatherParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflit
|
|||
std::vector<schema::Format> *tensors_format,
|
||||
std::map<int, int> *tensors_id_map) {
|
||||
MS_LOG(DEBUG) << "parse TfliteGatherParser";
|
||||
|
||||
// set attr
|
||||
if (op == nullptr) {
|
||||
MS_LOG(ERROR) << "op is null";
|
||||
return RET_NULL_PTR;
|
||||
|
@ -42,6 +40,10 @@ STATUS TfliteGatherParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflit
|
|||
}
|
||||
|
||||
std::unique_ptr<schema::GatherT> attr = std::make_unique<schema::GatherT>();
|
||||
if (attr == nullptr) {
|
||||
MS_LOG(ERROR) << "new op failed";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
|
||||
const auto &tflite_attr = tflite_op->builtin_options.AsGatherOptions();
|
||||
if (tflite_attr == nullptr) {
|
||||
|
|
|
@ -30,8 +30,6 @@ STATUS TfliteL2NormParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflit
|
|||
std::vector<schema::Format> *tensors_format,
|
||||
std::map<int, int> *tensors_id_map) {
|
||||
MS_LOG(DEBUG) << "parse TfliteL2NormParser";
|
||||
|
||||
// set attr
|
||||
if (op == nullptr) {
|
||||
MS_LOG(ERROR) << "op is null";
|
||||
return RET_NULL_PTR;
|
||||
|
@ -43,6 +41,11 @@ STATUS TfliteL2NormParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflit
|
|||
}
|
||||
|
||||
std::unique_ptr<schema::L2NormT> attr = std::make_unique<schema::L2NormT>();
|
||||
if (attr == nullptr) {
|
||||
MS_LOG(ERROR) << "new op failed";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
|
||||
if (tflite_op->inputs.empty()) {
|
||||
MS_LOG(ERROR) << "the input is null";
|
||||
return RET_NULL_PTR;
|
||||
|
|
|
@ -45,16 +45,28 @@ STATUS TfliteLogicalParser::Parse(const std::unique_ptr<tflite::OperatorT> &tfli
|
|||
if (std::strcmp(node_name, "LogicalAnd") == 0) {
|
||||
MS_LOG(DEBUG) << "parse TfliteLogicalAndParser";
|
||||
std::unique_ptr<schema::LogicalAndT> attr = std::make_unique<schema::LogicalAndT>();
|
||||
if (attr == nullptr) {
|
||||
MS_LOG(ERROR) << "new op failed";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
op->primitive->value.type = schema::PrimitiveType_LogicalAnd;
|
||||
op->primitive->value.value = attr.release();
|
||||
} else if (std::strcmp(node_name, "LogicalNot") == 0) {
|
||||
MS_LOG(DEBUG) << "parse TfliteLogicalNotParser";
|
||||
std::unique_ptr<schema::LogicalNotT> attr = std::make_unique<schema::LogicalNotT>();
|
||||
if (attr == nullptr) {
|
||||
MS_LOG(ERROR) << "new op failed";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
op->primitive->value.type = schema::PrimitiveType_LogicalNot;
|
||||
op->primitive->value.value = attr.release();
|
||||
} else if (std::strcmp(node_name, "LogicalOr") == 0) {
|
||||
MS_LOG(DEBUG) << "parse TfliteLogicalOrParser";
|
||||
std::unique_ptr<schema::LogicalOrT> attr = std::make_unique<schema::LogicalOrT>();
|
||||
if (attr == nullptr) {
|
||||
MS_LOG(ERROR) << "new op failed";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
op->primitive->value.type = schema::PrimitiveType_LogicalOr;
|
||||
op->primitive->value.value = attr.release();
|
||||
}
|
||||
|
|
|
@ -29,8 +29,6 @@ STATUS TfliteLRNParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_o
|
|||
std::vector<schema::Format> *tensors_format,
|
||||
std::map<int, int> *tensors_id_map) {
|
||||
MS_LOG(DEBUG) << "parse TfliteLRNParser";
|
||||
|
||||
// set attr
|
||||
if (op == nullptr) {
|
||||
MS_LOG(ERROR) << "op is null";
|
||||
return RET_NULL_PTR;
|
||||
|
@ -42,6 +40,10 @@ STATUS TfliteLRNParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_o
|
|||
}
|
||||
|
||||
std::unique_ptr<schema::LocalResponseNormalizationT> attr = std::make_unique<schema::LocalResponseNormalizationT>();
|
||||
if (attr == nullptr) {
|
||||
MS_LOG(ERROR) << "new op failed";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
|
||||
const auto &tflite_attr = tflite_op->builtin_options.AsLocalResponseNormalizationOptions();
|
||||
if (tflite_attr == nullptr) {
|
||||
|
|
|
@ -110,7 +110,6 @@ STATUS TfliteModelParser::ConvertOp(const std::unique_ptr<tflite::ModelT> &tflit
|
|||
op->quantType = quant_type;
|
||||
MS_LOG(INFO) << "parse op: " << op->name.c_str();
|
||||
|
||||
// parse tflite op
|
||||
auto node_parser = TfliteNodeParserRegistry::GetInstance()->GetNodeParser(op_type);
|
||||
if (node_parser == nullptr) {
|
||||
MS_LOG(ERROR) << "cannot find node parser, opType: " << op_type.c_str();
|
||||
|
@ -122,7 +121,6 @@ STATUS TfliteModelParser::ConvertOp(const std::unique_ptr<tflite::ModelT> &tflit
|
|||
return RET_ERROR;
|
||||
}
|
||||
|
||||
// add
|
||||
sub_graph->nodes.emplace_back(op.release());
|
||||
opMap[sub_graph->nodes.back()->name] = sub_graph->nodes.back().get();
|
||||
tfliteOpMap[tflite_op.get()] = sub_graph->nodes.back().get();
|
||||
|
@ -303,7 +301,6 @@ MetaGraphT *TfliteModelParser::Parse(const std::string &model_file,
|
|||
sub_graph->name = "MS_model converted by TF-Lite";
|
||||
|
||||
// load graph
|
||||
// std::unique_ptr<tflite::ModelT> tflite_model(new tflite::ModelT());
|
||||
std::unique_ptr<tflite::ModelT> tflite_model = ReadTfliteModel(model_file.c_str());
|
||||
|
||||
if (tflite_model->subgraphs.size() != 1) {
|
||||
|
|
|
@ -20,7 +20,14 @@ namespace mindspore {
|
|||
namespace lite {
|
||||
TfliteNodeParserRegistry::TfliteNodeParserRegistry() {}
|
||||
|
||||
TfliteNodeParserRegistry::~TfliteNodeParserRegistry() {}
|
||||
TfliteNodeParserRegistry::~TfliteNodeParserRegistry() {
|
||||
for (auto ite : parsers) {
|
||||
if (ite.second != nullptr) {
|
||||
delete ite.second;
|
||||
ite.second = nullptr;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
TfliteNodeParserRegistry *TfliteNodeParserRegistry::GetInstance() {
|
||||
static TfliteNodeParserRegistry instance;
|
||||
|
|
|
@ -29,7 +29,6 @@ STATUS TfliteOneHotParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflit
|
|||
std::vector<schema::Format> *tensors_format,
|
||||
std::map<int, int> *tensors_id_map) {
|
||||
MS_LOG(DEBUG) << "parse TfliteOneHotParser";
|
||||
|
||||
if (op == nullptr) {
|
||||
MS_LOG(ERROR) << "op is null";
|
||||
return RET_NULL_PTR;
|
||||
|
@ -41,6 +40,10 @@ STATUS TfliteOneHotParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflit
|
|||
}
|
||||
|
||||
std::unique_ptr<schema::OneHotT> attr = std::make_unique<schema::OneHotT>();
|
||||
if (attr == nullptr) {
|
||||
MS_LOG(ERROR) << "new op failed";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
|
||||
const auto &tflite_attr = tflite_op->builtin_options.AsOneHotOptions();
|
||||
if (tflite_attr == nullptr) {
|
||||
|
|
|
@ -29,7 +29,6 @@ STATUS TflitePadParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_o
|
|||
std::vector<schema::Format> *tensors_format,
|
||||
std::map<int, int> *tensors_id_map) {
|
||||
MS_LOG(DEBUG) << "parse TflitePadParser";
|
||||
|
||||
if (op == nullptr) {
|
||||
MS_LOG(ERROR) << "op is null";
|
||||
return RET_NULL_PTR;
|
||||
|
@ -41,6 +40,10 @@ STATUS TflitePadParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_o
|
|||
}
|
||||
|
||||
std::unique_ptr<schema::PadT> attr = std::make_unique<schema::PadT>();
|
||||
if (attr == nullptr) {
|
||||
MS_LOG(ERROR) << "new op failed";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
|
||||
const auto &tflite_attr = tflite_op->builtin_options.AsPadOptions();
|
||||
if (tflite_attr == nullptr) {
|
||||
|
|
|
@ -40,6 +40,10 @@ STATUS TflitePoolingParser::Parse(const std::unique_ptr<tflite::OperatorT> &tfli
|
|||
}
|
||||
|
||||
std::unique_ptr<schema::PoolingT> attr = std::make_unique<schema::PoolingT>();
|
||||
if (attr == nullptr) {
|
||||
MS_LOG(ERROR) << "new op failed";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
|
||||
std::vector<std::string> node_name_str;
|
||||
Split(op->name, &node_name_str, "-");
|
||||
|
|
|
@ -29,7 +29,6 @@ STATUS TfliteRangeParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite
|
|||
std::vector<schema::Format> *tensors_format,
|
||||
std::map<int, int> *tensors_id_map) {
|
||||
MS_LOG(DEBUG) << "parse TfliteRangeParser";
|
||||
|
||||
if (op == nullptr) {
|
||||
MS_LOG(ERROR) << "op is null";
|
||||
return RET_NULL_PTR;
|
||||
|
@ -41,6 +40,10 @@ STATUS TfliteRangeParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite
|
|||
}
|
||||
|
||||
std::unique_ptr<schema::RangeT> attr = std::make_unique<schema::RangeT>();
|
||||
if (attr == nullptr) {
|
||||
MS_LOG(ERROR) << "new op failed";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
|
||||
attr->dType = 0;
|
||||
// attr->start
|
||||
|
|
|
@ -29,7 +29,6 @@ STATUS TfliteRankParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_
|
|||
std::vector<schema::Format> *tensors_format,
|
||||
std::map<int, int> *tensors_id_map) {
|
||||
MS_LOG(DEBUG) << "parse TfliteRankParser";
|
||||
|
||||
if (op == nullptr) {
|
||||
MS_LOG(ERROR) << "op is null";
|
||||
return RET_NULL_PTR;
|
||||
|
@ -41,6 +40,10 @@ STATUS TfliteRankParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_
|
|||
}
|
||||
|
||||
std::unique_ptr<schema::RankT> attr = std::make_unique<schema::RankT>();
|
||||
if (attr == nullptr) {
|
||||
MS_LOG(ERROR) << "new op failed";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
|
||||
op->primitive->value.type = schema::PrimitiveType_Rank;
|
||||
op->primitive->value.value = attr.release();
|
||||
|
|
|
@ -29,7 +29,6 @@ STATUS TfliteReduceParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflit
|
|||
std::vector<int32_t> *tensors_id,
|
||||
std::vector<schema::Format> *tensors_format,
|
||||
std::map<int, int> *tensors_id_map) {
|
||||
// set attr
|
||||
if (op == nullptr) {
|
||||
MS_LOG(ERROR) << "op is null";
|
||||
return RET_NULL_PTR;
|
||||
|
@ -41,7 +40,10 @@ STATUS TfliteReduceParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflit
|
|||
}
|
||||
|
||||
std::unique_ptr<schema::ReduceT> attr = std::make_unique<schema::ReduceT>();
|
||||
// auto tflite_tensors = tflite_subgraph->tensors;
|
||||
if (attr == nullptr) {
|
||||
MS_LOG(ERROR) << "new op failed";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
|
||||
const auto &tflite_attr = tflite_op->builtin_options.AsReducerOptions();
|
||||
if (tflite_attr == nullptr) {
|
||||
|
|
|
@ -29,7 +29,6 @@ STATUS TfliteReshapeParser::Parse(const std::unique_ptr<tflite::OperatorT> &tfli
|
|||
std::vector<schema::Format> *tensors_format,
|
||||
std::map<int, int> *tensors_id_map) {
|
||||
MS_LOG(DEBUG) << "parse TfliteReshapeParser";
|
||||
|
||||
if (op == nullptr) {
|
||||
MS_LOG(ERROR) << "op is null";
|
||||
return RET_NULL_PTR;
|
||||
|
@ -41,6 +40,10 @@ STATUS TfliteReshapeParser::Parse(const std::unique_ptr<tflite::OperatorT> &tfli
|
|||
}
|
||||
|
||||
std::unique_ptr<schema::ReshapeT> attr = std::make_unique<schema::ReshapeT>();
|
||||
if (attr == nullptr) {
|
||||
MS_LOG(ERROR) << "new op failed";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
|
||||
const auto &tfliteAttr = tflite_op->builtin_options.AsReshapeOptions();
|
||||
if (tfliteAttr == nullptr) {
|
||||
|
|
|
@ -40,6 +40,10 @@ STATUS TfliteResizeParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflit
|
|||
}
|
||||
|
||||
std::unique_ptr<schema::ResizeT> attr = std::make_unique<schema::ResizeT>();
|
||||
if (attr == nullptr) {
|
||||
MS_LOG(ERROR) << "new op failed";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
|
||||
std::vector<std::string> node_name_str;
|
||||
Split(op->name.data(), &node_name_str, "-");
|
||||
|
|
|
@ -29,7 +29,6 @@ STATUS TfliteReverseParser::Parse(const std::unique_ptr<tflite::OperatorT> &tfli
|
|||
std::vector<schema::Format> *tensors_format,
|
||||
std::map<int, int> *tensors_id_map) {
|
||||
MS_LOG(DEBUG) << "parse TfliteReverseParser";
|
||||
|
||||
if (op == nullptr) {
|
||||
MS_LOG(ERROR) << "op is null";
|
||||
return RET_NULL_PTR;
|
||||
|
@ -41,6 +40,10 @@ STATUS TfliteReverseParser::Parse(const std::unique_ptr<tflite::OperatorT> &tfli
|
|||
}
|
||||
|
||||
std::unique_ptr<schema::ReverseT> attr = std::make_unique<schema::ReverseT>();
|
||||
if (attr == nullptr) {
|
||||
MS_LOG(ERROR) << "new op failed";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
|
||||
if (GetTfliteData(tflite_op->inputs[1], tflite_tensors, tflite_model_buffer, attr->axis)) {
|
||||
MS_LOG(ERROR) << "get reverse -> axis failed";
|
||||
|
|
|
@ -30,8 +30,6 @@ STATUS TfliteReverseSequenceParser::Parse(const std::unique_ptr<tflite::Operator
|
|||
std::vector<schema::Format> *tensors_format,
|
||||
std::map<int, int> *tensors_id_map) {
|
||||
MS_LOG(DEBUG) << "parse TfliteReverseSequenceParser";
|
||||
|
||||
// set attr
|
||||
if (op == nullptr) {
|
||||
MS_LOG(ERROR) << "op is null";
|
||||
return RET_NULL_PTR;
|
||||
|
@ -43,6 +41,10 @@ STATUS TfliteReverseSequenceParser::Parse(const std::unique_ptr<tflite::Operator
|
|||
}
|
||||
|
||||
std::unique_ptr<schema::ReverseSequenceT> attr = std::make_unique<schema::ReverseSequenceT>();
|
||||
if (attr == nullptr) {
|
||||
MS_LOG(ERROR) << "new op failed";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
|
||||
const auto &tflite_attr = tflite_op->builtin_options.AsReverseSequenceOptions();
|
||||
if (tflite_attr == nullptr) {
|
||||
|
|
|
@ -30,7 +30,6 @@ STATUS TfliteScatterNdParser::Parse(const std::unique_ptr<tflite::OperatorT> &tf
|
|||
std::vector<schema::Format> *tensors_format,
|
||||
std::map<int, int> *tensors_id_map) {
|
||||
MS_LOG(DEBUG) << "parse TfliteScatterNdParser";
|
||||
|
||||
if (op == nullptr) {
|
||||
MS_LOG(ERROR) << "op is null";
|
||||
return RET_NULL_PTR;
|
||||
|
@ -42,6 +41,10 @@ STATUS TfliteScatterNdParser::Parse(const std::unique_ptr<tflite::OperatorT> &tf
|
|||
}
|
||||
|
||||
std::unique_ptr<schema::ScatterNDT> attr = std::make_unique<schema::ScatterNDT>();
|
||||
if (attr == nullptr) {
|
||||
MS_LOG(ERROR) << "new op failed";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
|
||||
const auto &tflite_attr = tflite_op->builtin_options.AsScatterNdOptions();
|
||||
if (tflite_attr == nullptr) {
|
||||
|
|
|
@ -29,7 +29,6 @@ STATUS TfliteShapeParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite
|
|||
std::vector<schema::Format> *tensors_format,
|
||||
std::map<int, int> *tensors_id_map) {
|
||||
MS_LOG(DEBUG) << "parse TfliteShapeParser";
|
||||
|
||||
if (op == nullptr) {
|
||||
MS_LOG(ERROR) << "op is null";
|
||||
return RET_NULL_PTR;
|
||||
|
@ -41,6 +40,10 @@ STATUS TfliteShapeParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite
|
|||
}
|
||||
|
||||
std::unique_ptr<schema::ShapeT> attr = std::make_unique<schema::ShapeT>();
|
||||
if (attr == nullptr) {
|
||||
MS_LOG(ERROR) << "new op failed";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
|
||||
op->primitive->value.type = schema::PrimitiveType_Shape;
|
||||
op->primitive->value.value = attr.release();
|
||||
|
|
|
@ -29,8 +29,6 @@ STATUS TfliteSliceParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite
|
|||
std::vector<schema::Format> *tensors_format,
|
||||
std::map<int, int> *tensors_id_map) {
|
||||
MS_LOG(DEBUG) << "parse TfliteSliceParser";
|
||||
|
||||
// set attr
|
||||
if (op == nullptr) {
|
||||
MS_LOG(ERROR) << "op is null";
|
||||
return RET_NULL_PTR;
|
||||
|
@ -42,6 +40,10 @@ STATUS TfliteSliceParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite
|
|||
}
|
||||
|
||||
std::unique_ptr<schema::SliceT> attr = std::make_unique<schema::SliceT>();
|
||||
if (attr == nullptr) {
|
||||
MS_LOG(ERROR) << "new op failed";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
|
||||
attr->format = schema::Format_NHWC;
|
||||
|
||||
|
|
|
@ -29,7 +29,6 @@ STATUS TfliteSoftmaxParser::Parse(const std::unique_ptr<tflite::OperatorT> &tfli
|
|||
std::vector<schema::Format> *tensors_format,
|
||||
std::map<int, int> *tensors_id_map) {
|
||||
MS_LOG(DEBUG) << "parse TfliteSoftmaxParser";
|
||||
|
||||
if (op == nullptr) {
|
||||
MS_LOG(ERROR) << "op is null";
|
||||
return RET_NULL_PTR;
|
||||
|
@ -41,6 +40,10 @@ STATUS TfliteSoftmaxParser::Parse(const std::unique_ptr<tflite::OperatorT> &tfli
|
|||
}
|
||||
|
||||
std::unique_ptr<schema::SoftMaxT> attr = std::make_unique<schema::SoftMaxT>();
|
||||
if (attr == nullptr) {
|
||||
MS_LOG(ERROR) << "new op failed";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
|
||||
attr->axis = -1;
|
||||
|
||||
|
|
|
@ -30,7 +30,6 @@ STATUS TfliteSpaceToBatchNDParser::Parse(const std::unique_ptr<tflite::OperatorT
|
|||
std::vector<schema::Format> *tensors_format,
|
||||
std::map<int, int> *tensors_id_map) {
|
||||
MS_LOG(DEBUG) << "parse TfliteSpaceToBatchNDParser";
|
||||
|
||||
if (op == nullptr) {
|
||||
MS_LOG(ERROR) << "op is null";
|
||||
return RET_NULL_PTR;
|
||||
|
@ -42,6 +41,10 @@ STATUS TfliteSpaceToBatchNDParser::Parse(const std::unique_ptr<tflite::OperatorT
|
|||
}
|
||||
|
||||
std::unique_ptr<schema::SpaceToBatchNDT> attr = std::make_unique<schema::SpaceToBatchNDT>();
|
||||
if (attr == nullptr) {
|
||||
MS_LOG(ERROR) << "new op failed";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
|
||||
if (GetTfliteData(tflite_op->inputs[1], tflite_tensors, tflite_model_buffer, attr->blockShape)) {
|
||||
MS_LOG(ERROR) << "get spaceToBatchND -> blockShape failed";
|
||||
|
|
|
@ -30,7 +30,6 @@ STATUS TfliteSpaceToDepthParser::Parse(const std::unique_ptr<tflite::OperatorT>
|
|||
std::vector<schema::Format> *tensors_format,
|
||||
std::map<int, int> *tensors_id_map) {
|
||||
MS_LOG(DEBUG) << "parse TfliteSpaceToDepthParser";
|
||||
|
||||
if (op == nullptr) {
|
||||
MS_LOG(ERROR) << "op is null";
|
||||
return RET_NULL_PTR;
|
||||
|
@ -42,6 +41,10 @@ STATUS TfliteSpaceToDepthParser::Parse(const std::unique_ptr<tflite::OperatorT>
|
|||
}
|
||||
|
||||
std::unique_ptr<schema::SpaceToDepthT> attr = std::make_unique<schema::SpaceToDepthT>();
|
||||
if (attr == nullptr) {
|
||||
MS_LOG(ERROR) << "new op failed";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
|
||||
const auto &tflite_attr = tflite_op->builtin_options.AsSpaceToDepthOptions();
|
||||
if (tflite_attr == nullptr) {
|
||||
|
|
|
@ -30,7 +30,6 @@ STATUS TfliteSparseToDenseParser::Parse(const std::unique_ptr<tflite::OperatorT>
|
|||
std::vector<schema::Format> *tensors_format,
|
||||
std::map<int, int> *tensors_id_map) {
|
||||
MS_LOG(DEBUG) << "parse TfliteSparseToDenseParser";
|
||||
|
||||
if (op == nullptr) {
|
||||
MS_LOG(ERROR) << "op is null";
|
||||
return RET_NULL_PTR;
|
||||
|
@ -42,6 +41,10 @@ STATUS TfliteSparseToDenseParser::Parse(const std::unique_ptr<tflite::OperatorT>
|
|||
}
|
||||
|
||||
std::unique_ptr<schema::SparseToDenseT> attr = std::make_unique<schema::SparseToDenseT>();
|
||||
if (attr == nullptr) {
|
||||
MS_LOG(ERROR) << "new op failed";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
|
||||
attr->validateIndices = false;
|
||||
|
||||
|
|
|
@ -29,8 +29,6 @@ STATUS TfliteSplitParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite
|
|||
std::vector<schema::Format> *tensors_format,
|
||||
std::map<int, int> *tensors_id_map) {
|
||||
MS_LOG(DEBUG) << "parse TfliteSplitParser";
|
||||
|
||||
// set attr
|
||||
if (op == nullptr) {
|
||||
MS_LOG(ERROR) << "op is null";
|
||||
return RET_NULL_PTR;
|
||||
|
@ -42,6 +40,10 @@ STATUS TfliteSplitParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite
|
|||
}
|
||||
|
||||
std::unique_ptr<schema::SplitT> attr = std::make_unique<schema::SplitT>();
|
||||
if (attr == nullptr) {
|
||||
MS_LOG(ERROR) << "new op failed";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
|
||||
const auto &tflite_attr = tflite_op->builtin_options.AsSplitOptions();
|
||||
if (tflite_attr == nullptr) {
|
||||
|
|
|
@ -28,6 +28,8 @@ STATUS TfliteSplitVParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflit
|
|||
std::vector<int32_t> *tensors_id,
|
||||
std::vector<schema::Format> *tensors_format,
|
||||
std::map<int, int> *tensors_id_map) {
|
||||
MS_LOG(DEBUG) << "parse TfliteSplitVParser";
|
||||
|
||||
if (op == nullptr) {
|
||||
MS_LOG(ERROR) << "op is null";
|
||||
return RET_NULL_PTR;
|
||||
|
@ -38,8 +40,11 @@ STATUS TfliteSplitVParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflit
|
|||
return RET_NULL_PTR;
|
||||
}
|
||||
|
||||
MS_LOG(DEBUG) << "parse TfliteSplitVParser";
|
||||
std::unique_ptr<schema::SplitT> attr = std::make_unique<schema::SplitT>();
|
||||
if (attr == nullptr) {
|
||||
MS_LOG(ERROR) << "new op failed";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
|
||||
const auto &tflite_attr = tflite_op->builtin_options.AsSplitVOptions();
|
||||
if (tflite_attr == nullptr) {
|
||||
|
|
|
@ -29,7 +29,6 @@ STATUS TfliteSqueezeParser::Parse(const std::unique_ptr<tflite::OperatorT> &tfli
|
|||
std::vector<schema::Format> *tensors_format,
|
||||
std::map<int, int> *tensors_id_map) {
|
||||
MS_LOG(DEBUG) << "parse TfliteSqueezeParser";
|
||||
|
||||
if (op == nullptr) {
|
||||
MS_LOG(ERROR) << "op is null";
|
||||
return RET_NULL_PTR;
|
||||
|
@ -41,6 +40,10 @@ STATUS TfliteSqueezeParser::Parse(const std::unique_ptr<tflite::OperatorT> &tfli
|
|||
}
|
||||
|
||||
std::unique_ptr<schema::SqueezeT> attr = std::make_unique<schema::SqueezeT>();
|
||||
if (attr == nullptr) {
|
||||
MS_LOG(ERROR) << "new op failed";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
|
||||
const auto &tflite_attr = tflite_op->builtin_options.AsSqueezeOptions();
|
||||
if (tflite_attr == nullptr) {
|
||||
|
|
|
@ -29,7 +29,6 @@ STATUS TfliteStackParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite
|
|||
std::vector<schema::Format> *tensors_format,
|
||||
std::map<int, int> *tensors_id_map) {
|
||||
MS_LOG(DEBUG) << "parse TfliteStackParser";
|
||||
|
||||
if (op == nullptr) {
|
||||
MS_LOG(ERROR) << "op is null";
|
||||
return RET_NULL_PTR;
|
||||
|
@ -41,6 +40,11 @@ STATUS TfliteStackParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite
|
|||
}
|
||||
|
||||
std::unique_ptr<schema::StackT> attr = std::make_unique<schema::StackT>();
|
||||
if (attr == nullptr) {
|
||||
MS_LOG(ERROR) << "new op failed";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
|
||||
const auto &tflite_attr = tflite_op->builtin_options.AsPackOptions();
|
||||
if (tflite_attr == nullptr) {
|
||||
MS_LOG(ERROR) << "get op: " << op->name.c_str() << " attr failed";
|
||||
|
|
|
@ -28,6 +28,7 @@ STATUS TfliteStridedSliceParser::Parse(const std::unique_ptr<tflite::OperatorT>
|
|||
std::vector<int32_t> *tensors_id,
|
||||
std::vector<schema::Format> *tensors_format,
|
||||
std::map<int, int> *tensors_id_map) {
|
||||
MS_LOG(DEBUG) << "parse TfliteStridedSliceParser";
|
||||
if (op == nullptr) {
|
||||
MS_LOG(ERROR) << "op is null";
|
||||
return RET_NULL_PTR;
|
||||
|
@ -38,8 +39,12 @@ STATUS TfliteStridedSliceParser::Parse(const std::unique_ptr<tflite::OperatorT>
|
|||
return RET_NULL_PTR;
|
||||
}
|
||||
|
||||
MS_LOG(DEBUG) << "parse TfliteStridedSliceParser";
|
||||
std::unique_ptr<schema::StridedSliceT> attr = std::make_unique<schema::StridedSliceT>();
|
||||
if (attr == nullptr) {
|
||||
MS_LOG(ERROR) << "new op failed";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
|
||||
const auto &tflite_attr = tflite_op->builtin_options.AsStridedSliceOptions();
|
||||
if (tflite_attr == nullptr) {
|
||||
MS_LOG(ERROR) << "get op: %s attr failed", op->name.c_str();
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue