forked from mindspore-Ecosystem/mindspore
fix bugs
This commit is contained in:
parent
c9db2ed81f
commit
8e3fe59f24
|
@ -103,9 +103,10 @@ enum EltwiseMode : byte {
|
||||||
|
|
||||||
enum PadMode : byte {
|
enum PadMode : byte {
|
||||||
NOTSET = 0,
|
NOTSET = 0,
|
||||||
SAME = 1,
|
SAME_UPPER = 1,
|
||||||
VALID = 2,
|
VALID = 2,
|
||||||
CAFFE = 4
|
CAFFE = 4,
|
||||||
|
SAME_LOWER = 5
|
||||||
}
|
}
|
||||||
|
|
||||||
enum RoundMode : byte {
|
enum RoundMode : byte {
|
||||||
|
|
|
@ -350,7 +350,7 @@ int LiteSession::Init(Context *context) {
|
||||||
MS_LOG(INFO) << "Init OpenCL runtime.";
|
MS_LOG(INFO) << "Init OpenCL runtime.";
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
executor = new Executor();
|
executor = new(std::nothrow) Executor();
|
||||||
if (nullptr == executor) {
|
if (nullptr == executor) {
|
||||||
MS_LOG(ERROR) << "New Executor failed";
|
MS_LOG(ERROR) << "New Executor failed";
|
||||||
is_running_.store(false);
|
is_running_.store(false);
|
||||||
|
|
|
@ -156,7 +156,7 @@ void Conv2D::PopulaterConv2DMultiGroup(const Primitive &prim, schema::PrimitiveT
|
||||||
if (pad_mode == "valid") {
|
if (pad_mode == "valid") {
|
||||||
attr->padMode = schema::PadMode_VALID;
|
attr->padMode = schema::PadMode_VALID;
|
||||||
} else if (pad_mode == "same") {
|
} else if (pad_mode == "same") {
|
||||||
attr->padMode = schema::PadMode_SAME;
|
attr->padMode = schema::PadMode_SAME_UPPER;
|
||||||
} else {
|
} else {
|
||||||
attr->padMode = schema::PadMode_NOTSET;
|
attr->padMode = schema::PadMode_NOTSET;
|
||||||
}
|
}
|
||||||
|
@ -221,7 +221,7 @@ void Conv2D::PopulaterConv2DSingleGroup(const Primitive &prim, schema::Primitive
|
||||||
if (pad_mode == "valid") {
|
if (pad_mode == "valid") {
|
||||||
attr->padMode = schema::PadMode_VALID;
|
attr->padMode = schema::PadMode_VALID;
|
||||||
} else if (pad_mode == "same") {
|
} else if (pad_mode == "same") {
|
||||||
attr->padMode = schema::PadMode_SAME;
|
attr->padMode = schema::PadMode_SAME_UPPER;
|
||||||
} else {
|
} else {
|
||||||
attr->padMode = schema::PadMode_NOTSET;
|
attr->padMode = schema::PadMode_NOTSET;
|
||||||
}
|
}
|
||||||
|
@ -233,8 +233,6 @@ void Conv2D::PopulaterConv2DSingleGroup(const Primitive &prim, schema::Primitive
|
||||||
attr->activationType = schema::ActivationType_NO_ACTIVATION;
|
attr->activationType = schema::ActivationType_NO_ACTIVATION;
|
||||||
}
|
}
|
||||||
|
|
||||||
// attr->padMode = schema::PadMode_SAME;
|
|
||||||
// attr->activationType = schema::ActivationType_RELU;
|
|
||||||
primitive->value.type = schema::PrimitiveType_Conv2D;
|
primitive->value.type = schema::PrimitiveType_Conv2D;
|
||||||
primitive->value.value = attr.release();
|
primitive->value.value = attr.release();
|
||||||
}
|
}
|
||||||
|
@ -319,7 +317,7 @@ void Conv2D::ConvInferShape(int input_h, int input_w, int *output_h, int *output
|
||||||
pad_d_ = GetPadDown();
|
pad_d_ = GetPadDown();
|
||||||
pad_r_ = GetPadRight();
|
pad_r_ = GetPadRight();
|
||||||
|
|
||||||
if (GetPadMode() == schema::PadMode_SAME) {
|
if (GetPadMode() == schema::PadMode_SAME_UPPER) {
|
||||||
*output_w = std::ceil(static_cast<float>(input_w) / static_cast<float>(stride_w));
|
*output_w = std::ceil(static_cast<float>(input_w) / static_cast<float>(stride_w));
|
||||||
*output_h = std::ceil(static_cast<float>(input_h) / static_cast<float>(stride_h));
|
*output_h = std::ceil(static_cast<float>(input_h) / static_cast<float>(stride_h));
|
||||||
auto pad_h_all = ((*output_h - 1) * stride_h + (kernel_h - 1) * dilate_h + 1 - input_h);
|
auto pad_h_all = ((*output_h - 1) * stride_h + (kernel_h - 1) * dilate_h + 1 - input_h);
|
||||||
|
|
|
@ -119,7 +119,7 @@ int Conv2DGradFilter::UnPackAttr(const Primitive &prim, const std::vector<AnfNod
|
||||||
if (pad_mode == "valid") {
|
if (pad_mode == "valid") {
|
||||||
attr->padMode = schema::PadMode_VALID;
|
attr->padMode = schema::PadMode_VALID;
|
||||||
} else if (pad_mode == "same") {
|
} else if (pad_mode == "same") {
|
||||||
attr->padMode = schema::PadMode_SAME;
|
attr->padMode = schema::PadMode_SAME_UPPER;
|
||||||
} else {
|
} else {
|
||||||
attr->padMode = schema::PadMode_NOTSET;
|
attr->padMode = schema::PadMode_NOTSET;
|
||||||
}
|
}
|
||||||
|
|
|
@ -118,7 +118,7 @@ int Conv2DGradInput::UnPackAttr(const Primitive &prim, const std::vector<AnfNode
|
||||||
if (pad_mode == "valid") {
|
if (pad_mode == "valid") {
|
||||||
attr->padMode = schema::PadMode_VALID;
|
attr->padMode = schema::PadMode_VALID;
|
||||||
} else if (pad_mode == "same") {
|
} else if (pad_mode == "same") {
|
||||||
attr->padMode = schema::PadMode_SAME;
|
attr->padMode = schema::PadMode_SAME_UPPER;
|
||||||
} else {
|
} else {
|
||||||
attr->padMode = schema::PadMode_NOTSET;
|
attr->padMode = schema::PadMode_NOTSET;
|
||||||
}
|
}
|
||||||
|
|
|
@ -93,7 +93,7 @@ void DeConv2D::PopulaterDeConv2DSingleGroup(const Primitive &prim, schema::Primi
|
||||||
if (pad_mode == "valid" || pad_mode == "VALID") {
|
if (pad_mode == "valid" || pad_mode == "VALID") {
|
||||||
attr->padMode = schema::PadMode_VALID;
|
attr->padMode = schema::PadMode_VALID;
|
||||||
} else if (pad_mode == "same" || pad_mode == "SAME") {
|
} else if (pad_mode == "same" || pad_mode == "SAME") {
|
||||||
attr->padMode = schema::PadMode_SAME;
|
attr->padMode = schema::PadMode_SAME_UPPER;
|
||||||
} else {
|
} else {
|
||||||
attr->padMode = schema::PadMode_NOTSET;
|
attr->padMode = schema::PadMode_NOTSET;
|
||||||
}
|
}
|
||||||
|
@ -105,8 +105,6 @@ void DeConv2D::PopulaterDeConv2DSingleGroup(const Primitive &prim, schema::Primi
|
||||||
attr->activationType = schema::ActivationType_NO_ACTIVATION;
|
attr->activationType = schema::ActivationType_NO_ACTIVATION;
|
||||||
}
|
}
|
||||||
|
|
||||||
// attr->padMode = schema::PadMode_SAME;
|
|
||||||
// attr->activationType = schema::ActivationType_RELU;
|
|
||||||
primitive->value.type = schema::PrimitiveType_DeConv2D;
|
primitive->value.type = schema::PrimitiveType_DeConv2D;
|
||||||
primitive->value.value = attr.release();
|
primitive->value.value = attr.release();
|
||||||
}
|
}
|
||||||
|
@ -206,10 +204,10 @@ int DeConv2D::InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::
|
||||||
pad_d_ = GetPadDown();
|
pad_d_ = GetPadDown();
|
||||||
pad_r_ = GetPadRight();
|
pad_r_ = GetPadRight();
|
||||||
auto pad_mode = (schema::PadMode)GetPadMode();
|
auto pad_mode = (schema::PadMode)GetPadMode();
|
||||||
if (pad_mode == schema::PadMode_CAFFE) {
|
if (pad_mode == schema::PadMode_CAFFE || pad_mode == schema::PadMode_NOTSET) {
|
||||||
output_h = (input_h - 1) * stride_h + ((kernel_h - 1) * dilate_h + 1) - pad_u_ - pad_d_;
|
output_h = (input_h - 1) * stride_h + ((kernel_h - 1) * dilate_h + 1) - pad_u_ - pad_d_;
|
||||||
output_w = (input_w - 1) * stride_w + ((kernel_w - 1) * dilate_w + 1) - pad_l_ - pad_r_;
|
output_w = (input_w - 1) * stride_w + ((kernel_w - 1) * dilate_w + 1) - pad_l_ - pad_r_;
|
||||||
} else if (pad_mode == schema::PadMode_SAME) {
|
} else if (pad_mode == schema::PadMode_SAME_UPPER) {
|
||||||
output_h = input_h * stride_h;
|
output_h = input_h * stride_h;
|
||||||
output_w = input_w * stride_w;
|
output_w = input_w * stride_w;
|
||||||
} else if (pad_mode == schema::PadMode_VALID) {
|
} else if (pad_mode == schema::PadMode_VALID) {
|
||||||
|
@ -221,13 +219,13 @@ int DeConv2D::InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::
|
||||||
std::vector<int> out_shape = {output_n, output_h, output_w, output_c};
|
std::vector<int> out_shape = {output_n, output_h, output_w, output_c};
|
||||||
output->set_shape(out_shape);
|
output->set_shape(out_shape);
|
||||||
|
|
||||||
if (pad_mode == schema::PadMode_SAME) {
|
if (pad_mode == schema::PadMode_SAME_UPPER) {
|
||||||
pad_u_ = ((input_h - 1) * stride_h + (kernel_h - 1) * dilate_h + 1 - output_h) / 2;
|
pad_u_ = ((input_h - 1) * stride_h + (kernel_h - 1) * dilate_h + 1 - output_h) / 2;
|
||||||
pad_l_ = ((input_w - 1) * stride_w + (kernel_w - 1) * dilate_w + 1 - output_w) / 2;
|
pad_l_ = ((input_w - 1) * stride_w + (kernel_w - 1) * dilate_w + 1 - output_w) / 2;
|
||||||
} else if (pad_mode == schema::PadMode_VALID) {
|
} else if (pad_mode == schema::PadMode_VALID) {
|
||||||
pad_u_ = 0;
|
pad_u_ = 0;
|
||||||
pad_l_ = 0;
|
pad_l_ = 0;
|
||||||
} else if (pad_mode == schema::PadMode_CAFFE) {
|
} else if (pad_mode == schema::PadMode_CAFFE || pad_mode == schema::PadMode_NOTSET) {
|
||||||
} else {
|
} else {
|
||||||
MS_LOG(ERROR) << "unsupported pad mode for deconv";
|
MS_LOG(ERROR) << "unsupported pad mode for deconv";
|
||||||
}
|
}
|
||||||
|
|
|
@ -104,7 +104,7 @@ int DepthwiseConv2D::UnPackAttr(const Primitive &prim, const std::vector<AnfNode
|
||||||
if (pad_mode == "valid") {
|
if (pad_mode == "valid") {
|
||||||
attr->padMode = schema::PadMode_VALID;
|
attr->padMode = schema::PadMode_VALID;
|
||||||
} else if (pad_mode == "same") {
|
} else if (pad_mode == "same") {
|
||||||
attr->padMode = schema::PadMode_SAME;
|
attr->padMode = schema::PadMode_SAME_UPPER;
|
||||||
} else {
|
} else {
|
||||||
attr->padMode = schema::PadMode_NOTSET;
|
attr->padMode = schema::PadMode_NOTSET;
|
||||||
}
|
}
|
||||||
|
@ -114,8 +114,6 @@ int DepthwiseConv2D::UnPackAttr(const Primitive &prim, const std::vector<AnfNode
|
||||||
} else {
|
} else {
|
||||||
attr->activationType = schema::ActivationType_NO_ACTIVATION;
|
attr->activationType = schema::ActivationType_NO_ACTIVATION;
|
||||||
}
|
}
|
||||||
// attr->padMode = schema::PadMode_SAME;
|
|
||||||
// attr->activationType = schema::ActivationType_RELU;
|
|
||||||
auto channel_multiplier = GetValue<int>(prim.GetAttr("channel_multiplier"));
|
auto channel_multiplier = GetValue<int>(prim.GetAttr("channel_multiplier"));
|
||||||
attr->channelMultiplier = channel_multiplier;
|
attr->channelMultiplier = channel_multiplier;
|
||||||
|
|
||||||
|
@ -220,7 +218,7 @@ int DepthwiseConv2D::InferShape(std::vector<lite::Tensor *> inputs_, std::vector
|
||||||
pad_u_ = GetPadUp();
|
pad_u_ = GetPadUp();
|
||||||
pad_d_ = GetPadDown();
|
pad_d_ = GetPadDown();
|
||||||
pad_r_ = GetPadRight();
|
pad_r_ = GetPadRight();
|
||||||
if (GetPadMode() == schema::PadMode_SAME) {
|
if (GetPadMode() == schema::PadMode_SAME_UPPER) {
|
||||||
output_h = std::ceil(static_cast<float>(input_h) / static_cast<float>(GetStrideH()));
|
output_h = std::ceil(static_cast<float>(input_h) / static_cast<float>(GetStrideH()));
|
||||||
output_w = std::ceil(static_cast<float>(input_w) / static_cast<float>(GetStrideW()));
|
output_w = std::ceil(static_cast<float>(input_w) / static_cast<float>(GetStrideW()));
|
||||||
auto pad_h_all = ((output_h - 1) * GetStrideH() + (GetKernelH() - 1) * GetDilateH() + 1 - input_h);
|
auto pad_h_all = ((output_h - 1) * GetStrideH() + (GetKernelH() - 1) * GetDilateH() + 1 - input_h);
|
||||||
|
|
|
@ -29,5 +29,15 @@ int Equal::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
int Equal::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outputs_) {
|
||||||
|
auto input = inputs_.front();
|
||||||
|
MS_ASSERT(input != nullptr);
|
||||||
|
auto output = outputs_.front();
|
||||||
|
MS_ASSERT(output != nullptr);
|
||||||
|
output->set_shape(input->shape());
|
||||||
|
output->set_data_type(TypeId::kNumberTypeUInt8);
|
||||||
|
output->SetFormat(input->GetFormat());
|
||||||
|
return RET_OK;
|
||||||
|
}
|
||||||
} // namespace lite
|
} // namespace lite
|
||||||
} // namespace mindspore
|
} // namespace mindspore
|
||||||
|
|
|
@ -36,6 +36,7 @@ class Equal : public Arithmetic {
|
||||||
|
|
||||||
int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override;
|
int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override;
|
||||||
#endif
|
#endif
|
||||||
|
int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override;
|
||||||
};
|
};
|
||||||
} // namespace lite
|
} // namespace lite
|
||||||
} // namespace mindspore
|
} // namespace mindspore
|
||||||
|
|
|
@ -91,7 +91,7 @@ int Gather::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outp
|
||||||
std::vector<int> out_shape{in_shape};
|
std::vector<int> out_shape{in_shape};
|
||||||
out_shape.erase(out_shape.begin() + axis);
|
out_shape.erase(out_shape.begin() + axis);
|
||||||
for (int i = indices_rank - 1; i >= 0; --i) {
|
for (int i = indices_rank - 1; i >= 0; --i) {
|
||||||
out_shape.insert(out_shape.begin() + axis + i, indices_shape[i]);
|
out_shape.insert(out_shape.begin() + axis, indices_shape[i]);
|
||||||
}
|
}
|
||||||
output->set_shape(out_shape);
|
output->set_shape(out_shape);
|
||||||
return RET_OK;
|
return RET_OK;
|
||||||
|
|
|
@ -97,7 +97,7 @@ int Pooling::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &in
|
||||||
if (pad_mode == "VALID") {
|
if (pad_mode == "VALID") {
|
||||||
attr->padMode = schema::PadMode_VALID;
|
attr->padMode = schema::PadMode_VALID;
|
||||||
} else if (pad_mode == "SAME") {
|
} else if (pad_mode == "SAME") {
|
||||||
attr->padMode = schema::PadMode_SAME;
|
attr->padMode = schema::PadMode_SAME_UPPER;
|
||||||
} else {
|
} else {
|
||||||
attr->padMode = schema::PadMode_NOTSET;
|
attr->padMode = schema::PadMode_NOTSET;
|
||||||
}
|
}
|
||||||
|
@ -187,7 +187,7 @@ int Pooling::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> out
|
||||||
pad_u_ = GetPadUp();
|
pad_u_ = GetPadUp();
|
||||||
pad_d_ = GetPadDown();
|
pad_d_ = GetPadDown();
|
||||||
pad_r_ = GetPadRight();
|
pad_r_ = GetPadRight();
|
||||||
if (GetPadMode() == schema::PadMode_SAME) {
|
if (GetPadMode() == schema::PadMode_SAME_UPPER) {
|
||||||
output_w = std::ceil(static_cast<float>(input_w) / static_cast<float>(GetStrideW()));
|
output_w = std::ceil(static_cast<float>(input_w) / static_cast<float>(GetStrideW()));
|
||||||
output_h = std::ceil(static_cast<float>(input_h) / static_cast<float>(GetStrideH()));
|
output_h = std::ceil(static_cast<float>(input_h) / static_cast<float>(GetStrideH()));
|
||||||
auto pad_h_all = ((output_h - 1) * GetStrideH() + (window_h - 1) + 1 - input_h);
|
auto pad_h_all = ((output_h - 1) * GetStrideH() + (window_h - 1) + 1 - input_h);
|
||||||
|
|
|
@ -90,7 +90,7 @@ int PoolingGrad::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr>
|
||||||
if (pad_mode == "VALID") {
|
if (pad_mode == "VALID") {
|
||||||
attr->padMode = schema::PadMode_VALID;
|
attr->padMode = schema::PadMode_VALID;
|
||||||
} else if (pad_mode == "SAME") {
|
} else if (pad_mode == "SAME") {
|
||||||
attr->padMode = schema::PadMode_SAME;
|
attr->padMode = schema::PadMode_SAME_UPPER;
|
||||||
} else {
|
} else {
|
||||||
attr->padMode = schema::PadMode_NOTSET;
|
attr->padMode = schema::PadMode_NOTSET;
|
||||||
}
|
}
|
||||||
|
@ -162,7 +162,7 @@ int PoolingGrad::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *>
|
||||||
pad_u_ = GetPadUp();
|
pad_u_ = GetPadUp();
|
||||||
pad_d_ = GetPadDown();
|
pad_d_ = GetPadDown();
|
||||||
pad_r_ = GetPadRight();
|
pad_r_ = GetPadRight();
|
||||||
if (GetPadMode() == schema::PadMode_SAME) {
|
if (GetPadMode() == schema::PadMode_SAME_UPPER) {
|
||||||
int output_w = std::ceil(static_cast<float>(input_w) / static_cast<float>(GetStrideW()));
|
int output_w = std::ceil(static_cast<float>(input_w) / static_cast<float>(GetStrideW()));
|
||||||
int output_h = std::ceil(static_cast<float>(input_h) / static_cast<float>(GetStrideH()));
|
int output_h = std::ceil(static_cast<float>(input_h) / static_cast<float>(GetStrideH()));
|
||||||
auto pad_h_all = ((output_h - 1) * GetStrideH() + (window_h - 1) + 1 - input_h);
|
auto pad_h_all = ((output_h - 1) * GetStrideH() + (window_h - 1) + 1 - input_h);
|
||||||
|
|
|
@ -42,7 +42,7 @@ TEST_F(InferTest, TestConvNode) {
|
||||||
node->primitive = std::make_unique<schema::PrimitiveT>();
|
node->primitive = std::make_unique<schema::PrimitiveT>();
|
||||||
node->primitive->value.type = schema::PrimitiveType_Conv2D;
|
node->primitive->value.type = schema::PrimitiveType_Conv2D;
|
||||||
auto primitive = new schema::Conv2DT;
|
auto primitive = new schema::Conv2DT;
|
||||||
primitive->padMode = schema::PadMode_SAME;
|
primitive->padMode = schema::PadMode_SAME_UPPER;
|
||||||
primitive->channelIn = 3;
|
primitive->channelIn = 3;
|
||||||
primitive->channelOut = 32;
|
primitive->channelOut = 32;
|
||||||
primitive->format = schema::Format_NHWC;
|
primitive->format = schema::Format_NHWC;
|
||||||
|
@ -231,18 +231,6 @@ TEST_F(InferTest, TestAddNode) {
|
||||||
ASSERT_EQ(TypeId::kNumberTypeFloat32, outTensor->data_type());
|
ASSERT_EQ(TypeId::kNumberTypeFloat32, outTensor->data_type());
|
||||||
auto *outData = reinterpret_cast<float *>(outTensor->MutableData());
|
auto *outData = reinterpret_cast<float *>(outTensor->MutableData());
|
||||||
ASSERT_NE(nullptr, outData);
|
ASSERT_NE(nullptr, outData);
|
||||||
// //===================================================
|
|
||||||
// size_t output_size;
|
|
||||||
// std::string output_path = "./convfp32_out_1_28_28_32.bin";
|
|
||||||
// ReadFile(output_path.c_str(), &output_size, buf);
|
|
||||||
// ASSERT_NE(nullptr, buf[0]);
|
|
||||||
// auto output_data = reinterpret_cast<float *>(buf[0]);
|
|
||||||
// ASSERT_NE(nullptr, output_data);
|
|
||||||
// //===================================================
|
|
||||||
// ASSERT_EQ(output_size, outTensor->Size());
|
|
||||||
// for (size_t i = 0; i < outTensor->ElementsNum(); i++) {
|
|
||||||
// ASSERT_EQ(output_data[i], outData[i]);
|
|
||||||
// }
|
|
||||||
MS_LOG(INFO) << "Passed";
|
MS_LOG(INFO) << "Passed";
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -366,141 +354,4 @@ TEST_F(InferTest, TestModel) {
|
||||||
auto outputs = session->GetOutputs();
|
auto outputs = session->GetOutputs();
|
||||||
MS_LOG(INFO) << "Passed";
|
MS_LOG(INFO) << "Passed";
|
||||||
}
|
}
|
||||||
|
|
||||||
// TEST_F(TrainTest, TestMultiNode) {
|
|
||||||
// auto msGraph = std::make_shared<schema::GraphDefT>();
|
|
||||||
// msGraph->name = "graph";
|
|
||||||
// auto msSubgraph = std::make_unique<schema::SubGraphDefT>();
|
|
||||||
// msSubgraph->name = "subGraph";
|
|
||||||
//
|
|
||||||
// auto conv = std::make_unique<schema::OpDefT>();
|
|
||||||
// conv->inputIndex = {0, 1};
|
|
||||||
// conv->outputIndex = {2};
|
|
||||||
// conv->attr.type = schema::OpT_Conv2D;
|
|
||||||
// auto conv_attr = new schema::Conv2DT;
|
|
||||||
// conv_attr->padMode = schema::PadMode_SAME;
|
|
||||||
// conv_attr->format = schema::Format_NHWC;
|
|
||||||
// conv_attr->strideH = 1;
|
|
||||||
// conv_attr->strideW = 1;
|
|
||||||
// conv_attr->kernelH = 3;
|
|
||||||
// conv_attr->kernelW = 3;
|
|
||||||
// conv_attr->dilateH = 1;
|
|
||||||
// conv_attr->dilateW = 1;
|
|
||||||
//
|
|
||||||
// conv->attr.value = conv_attr;
|
|
||||||
// conv->name = "Conv2D";
|
|
||||||
// conv->fmkType = schema::FmkType_CAFFE;
|
|
||||||
// msSubgraph->nodes.emplace_back(std::move(conv));
|
|
||||||
//
|
|
||||||
// auto matMul1 = std::make_unique<schema::OpDefT>();
|
|
||||||
// matMul1->inputIndex = {2, 3};
|
|
||||||
// matMul1->outputIndex = {4};
|
|
||||||
// matMul1->attr.type = schema::OpT_MatMul;
|
|
||||||
// auto matMul_attr1 = new schema::MatMulT;
|
|
||||||
// matMul_attr1->transposeA = false;
|
|
||||||
// matMul_attr1->transposeB = true;
|
|
||||||
// matMul1->attr.value = matMul_attr1;
|
|
||||||
// matMul1->name = "matmul1";
|
|
||||||
// matMul1->fmkType = schema::FmkType_CAFFE;
|
|
||||||
// msSubgraph->nodes.emplace_back(std::move(matMul1));
|
|
||||||
//
|
|
||||||
// auto matMul2 = std::make_unique<schema::OpDefT>();
|
|
||||||
// matMul2->inputIndex = {4, 5};
|
|
||||||
// matMul2->outputIndex = {6};
|
|
||||||
// matMul2->attr.type = schema::OpT_MatMul;
|
|
||||||
// auto matMul_attr2 = new schema::MatMulT;
|
|
||||||
// matMul_attr2->transposeA = false;
|
|
||||||
// matMul_attr2->transposeB = true;
|
|
||||||
// matMul2->attr.value = matMul_attr2;
|
|
||||||
// matMul2->name = "matmul2";
|
|
||||||
// matMul2->fmkType = schema::FmkType_CAFFE;
|
|
||||||
// msSubgraph->nodes.emplace_back(std::move(matMul2));
|
|
||||||
//
|
|
||||||
// msSubgraph->inputIndex = {0};
|
|
||||||
// msSubgraph->outputIndex = {6};
|
|
||||||
//
|
|
||||||
// auto input0 = std::make_unique<schema::TensorDefT>();
|
|
||||||
// input0->refCount = schema::MSCONST_WEIGHT_REFCOUNT;
|
|
||||||
// input0->format = schema::Format_NHWC;
|
|
||||||
// input0->dataType = TypeId::kNumberTypeFloat32;
|
|
||||||
// input0->dims = {1, 5, 5, 3};
|
|
||||||
// input0->offset = -1;
|
|
||||||
// msSubgraph->allTensors.emplace_back(std::move(input0));
|
|
||||||
//
|
|
||||||
// auto conv_weight = std::make_unique<schema::TensorDefT>();
|
|
||||||
// conv_weight->refCount = schema::MSCONST_WEIGHT_REFCOUNT;
|
|
||||||
// conv_weight->format = schema::Format_KHWC;
|
|
||||||
// conv_weight->dataType = TypeId::kNumberTypeFloat32;
|
|
||||||
// conv_weight->dims = {8, 3, 3, 3};
|
|
||||||
// conv_weight->data.resize(8*3*3*3*sizeof(float));
|
|
||||||
// msSubgraph->allTensors.emplace_back(std::move(conv_weight));
|
|
||||||
//
|
|
||||||
// auto conv_output = std::make_unique<schema::TensorDefT>();
|
|
||||||
// conv_output->refCount = 0;
|
|
||||||
// conv_output->format = schema::Format_NHWC;
|
|
||||||
// conv_output->dataType = TypeId::kNumberTypeFloat32;
|
|
||||||
// conv_output->dims = {1, 5, 5, 8};
|
|
||||||
// msSubgraph->allTensors.emplace_back(std::move(conv_output));
|
|
||||||
//
|
|
||||||
// auto add_weight = std::make_unique<schema::TensorDefT>();
|
|
||||||
// add_weight->refCount = schema::MSCONST_WEIGHT_REFCOUNT;
|
|
||||||
// add_weight->format = schema::Format_NHWC;
|
|
||||||
// add_weight->dataType = TypeId::kNumberTypeFloat32;
|
|
||||||
// add_weight->dims = {1, 5, 5, 8};
|
|
||||||
// add_weight->data.resize(5*5*8*sizeof(float));
|
|
||||||
// msSubgraph->allTensors.emplace_back(std::move(add_weight));
|
|
||||||
//
|
|
||||||
// auto add_output = std::make_unique<schema::TensorDefT>();
|
|
||||||
// add_output->refCount = 0;
|
|
||||||
// add_output->format = schema::Format_NHWC;
|
|
||||||
// add_output->dataType = TypeId::kNumberTypeFloat32;
|
|
||||||
// add_output->dims = {1, 5, 5, 8};
|
|
||||||
// msSubgraph->allTensors.emplace_back(std::move(add_output));
|
|
||||||
//
|
|
||||||
// auto mul_weight = std::make_unique<schema::TensorDefT>();
|
|
||||||
// mul_weight->refCount = schema::MSCONST_WEIGHT_REFCOUNT;
|
|
||||||
// mul_weight->format = schema::Format_NHWC;
|
|
||||||
// mul_weight->dataType = TypeId::kNumberTypeFloat32;
|
|
||||||
// mul_weight->dims = {1, 5, 5, 8};
|
|
||||||
// mul_weight->data.resize(5*5*8*sizeof(float));
|
|
||||||
// msSubgraph->allTensors.emplace_back(std::move(mul_weight));
|
|
||||||
//
|
|
||||||
// auto mul_output = std::make_unique<schema::TensorDefT>();
|
|
||||||
// mul_output->refCount = 0;
|
|
||||||
// mul_output->format = schema::Format_NHWC;
|
|
||||||
// mul_output->dataType = TypeId::kNumberTypeFloat32;
|
|
||||||
// mul_output->dims = {1, 5, 5, 8};
|
|
||||||
// msSubgraph->allTensors.emplace_back(std::move(mul_output));
|
|
||||||
// msGraph->subgraphs.emplace_back(std::move(msSubgraph));
|
|
||||||
//
|
|
||||||
// flatbuffers::FlatBufferBuilder builder(1024);
|
|
||||||
// auto offset = schema::GraphDef::Pack(builder, msGraph.get());
|
|
||||||
// builder.Finish(offset);
|
|
||||||
// size_t size = builder.GetSize();
|
|
||||||
// const char *content = (char *)builder.GetBufferPointer();
|
|
||||||
// const std::string strstub = "";
|
|
||||||
//
|
|
||||||
// auto func_graph = inference::LoadModel(content, size, strstub);
|
|
||||||
// ASSERT_NE(nullptr, func_graph);
|
|
||||||
// auto session = inference::MSSession::CreateSession(kCPUDevice, 0);
|
|
||||||
// ASSERT_NE(nullptr, session);
|
|
||||||
// auto graphId = session->CompileGraph(func_graph);
|
|
||||||
//
|
|
||||||
// auto inTensor =
|
|
||||||
// std::shared_ptr<inference::MSTensor>(inference::MSTensor::CreateTensor(TypeId::kNumberTypeFloat32, {1, 5, 5, 3}));
|
|
||||||
// ASSERT_NE(nullptr, inTensor);
|
|
||||||
// ASSERT_EQ(sizeof(float) * (5 * 5 * 3), inTensor->Size());
|
|
||||||
// (void)inTensor->MutableData();
|
|
||||||
//
|
|
||||||
// std::vector<std::shared_ptr<inference::MSTensor>> inputs;
|
|
||||||
// inputs.emplace_back(inTensor);
|
|
||||||
// auto outputs = session->RunGraph(graphId, inputs);
|
|
||||||
// ASSERT_EQ(1, outputs.size());
|
|
||||||
// ASSERT_EQ(1, outputs.front().size());
|
|
||||||
// auto runOutput = outputs.front().front();
|
|
||||||
// ASSERT_NE(nullptr, runOutput);
|
|
||||||
// ASSERT_EQ(5 * 5 * 8, runOutput->ElementsNum());
|
|
||||||
// ASSERT_EQ(TypeId::kNumberTypeFloat32, runOutput->data_type());
|
|
||||||
// MS_LOG(INFO) << "Passed";
|
|
||||||
//}
|
|
||||||
} // namespace mindspore
|
} // namespace mindspore
|
||||||
|
|
|
@ -46,7 +46,7 @@ TEST_F(TestTfliteParserConv, AttrValue) {
|
||||||
ASSERT_EQ(val->strideW, 1);
|
ASSERT_EQ(val->strideW, 1);
|
||||||
ASSERT_EQ(val->dilateH, 1);
|
ASSERT_EQ(val->dilateH, 1);
|
||||||
ASSERT_EQ(val->dilateW, 1);
|
ASSERT_EQ(val->dilateW, 1);
|
||||||
ASSERT_EQ(val->padMode, schema::PadMode_SAME);
|
ASSERT_EQ(val->padMode, schema::PadMode_SAME_UPPER);
|
||||||
ASSERT_EQ(val->padUp, 1);
|
ASSERT_EQ(val->padUp, 1);
|
||||||
ASSERT_EQ(val->padDown, 1);
|
ASSERT_EQ(val->padDown, 1);
|
||||||
ASSERT_EQ(val->padLeft, 1);
|
ASSERT_EQ(val->padLeft, 1);
|
||||||
|
|
|
@ -47,7 +47,7 @@ TEST_F(TestTfliteParserDeConv, AttrValue) {
|
||||||
ASSERT_EQ(val->strideW, 1);
|
ASSERT_EQ(val->strideW, 1);
|
||||||
ASSERT_EQ(val->dilateH, 1);
|
ASSERT_EQ(val->dilateH, 1);
|
||||||
ASSERT_EQ(val->dilateW, 1);
|
ASSERT_EQ(val->dilateW, 1);
|
||||||
ASSERT_EQ(val->padMode, schema::PadMode_SAME);
|
ASSERT_EQ(val->padMode, schema::PadMode_SAME_UPPER);
|
||||||
ASSERT_EQ(val->padUp, 1);
|
ASSERT_EQ(val->padUp, 1);
|
||||||
ASSERT_EQ(val->padDown, 1);
|
ASSERT_EQ(val->padDown, 1);
|
||||||
ASSERT_EQ(val->padLeft, 1);
|
ASSERT_EQ(val->padLeft, 1);
|
||||||
|
|
|
@ -46,7 +46,7 @@ TEST_F(TestTfliteParserDepthwiseConv1, AttrValue) {
|
||||||
ASSERT_EQ(val->strideW, 1);
|
ASSERT_EQ(val->strideW, 1);
|
||||||
ASSERT_EQ(val->dilateH, 1);
|
ASSERT_EQ(val->dilateH, 1);
|
||||||
ASSERT_EQ(val->dilateW, 1);
|
ASSERT_EQ(val->dilateW, 1);
|
||||||
ASSERT_EQ(val->padMode, schema::PadMode_SAME);
|
ASSERT_EQ(val->padMode, schema::PadMode_SAME_UPPER);
|
||||||
ASSERT_EQ(val->padUp, 1);
|
ASSERT_EQ(val->padUp, 1);
|
||||||
ASSERT_EQ(val->padDown, 1);
|
ASSERT_EQ(val->padDown, 1);
|
||||||
ASSERT_EQ(val->padLeft, 1);
|
ASSERT_EQ(val->padLeft, 1);
|
||||||
|
@ -80,7 +80,7 @@ TEST_F(TestTfliteParserDepthwiseConv2, AttrValue) {
|
||||||
ASSERT_EQ(val->strideW, 1);
|
ASSERT_EQ(val->strideW, 1);
|
||||||
ASSERT_EQ(val->dilateH, 1);
|
ASSERT_EQ(val->dilateH, 1);
|
||||||
ASSERT_EQ(val->dilateW, 1);
|
ASSERT_EQ(val->dilateW, 1);
|
||||||
ASSERT_EQ(val->padMode, schema::PadMode_SAME);
|
ASSERT_EQ(val->padMode, schema::PadMode_SAME_UPPER);
|
||||||
ASSERT_EQ(val->padUp, 1);
|
ASSERT_EQ(val->padUp, 1);
|
||||||
ASSERT_EQ(val->padDown, 1);
|
ASSERT_EQ(val->padDown, 1);
|
||||||
ASSERT_EQ(val->padLeft, 1);
|
ASSERT_EQ(val->padLeft, 1);
|
||||||
|
|
|
@ -73,7 +73,7 @@ TEST_F(TestTfliteParserAvgPooling, AttrValue) {
|
||||||
ASSERT_EQ(val->windowH, 2);
|
ASSERT_EQ(val->windowH, 2);
|
||||||
ASSERT_EQ(val->strideW, 1);
|
ASSERT_EQ(val->strideW, 1);
|
||||||
ASSERT_EQ(val->strideH, 1);
|
ASSERT_EQ(val->strideH, 1);
|
||||||
ASSERT_EQ(val->padMode, schema::PadMode_SAME);
|
ASSERT_EQ(val->padMode, schema::PadMode_SAME_UPPER);
|
||||||
ASSERT_EQ(val->padUp, 0);
|
ASSERT_EQ(val->padUp, 0);
|
||||||
ASSERT_EQ(val->padDown, 1);
|
ASSERT_EQ(val->padDown, 1);
|
||||||
ASSERT_EQ(val->padLeft, 0);
|
ASSERT_EQ(val->padLeft, 0);
|
||||||
|
|
|
@ -42,7 +42,7 @@ CNodeTptr BuildConv2D() {
|
||||||
convNode->primitive = std::make_unique<schema::PrimitiveT>();
|
convNode->primitive = std::make_unique<schema::PrimitiveT>();
|
||||||
convNode->primitive->value.type = schema::PrimitiveType_Conv2D;
|
convNode->primitive->value.type = schema::PrimitiveType_Conv2D;
|
||||||
auto prim1 = new schema::Conv2DT;
|
auto prim1 = new schema::Conv2DT;
|
||||||
prim1->padMode = schema::PadMode_SAME;
|
prim1->padMode = schema::PadMode_SAME_UPPER;
|
||||||
prim1->format = schema::Format_NHWC;
|
prim1->format = schema::Format_NHWC;
|
||||||
prim1->strideH = 1;
|
prim1->strideH = 1;
|
||||||
prim1->strideW = 1;
|
prim1->strideW = 1;
|
||||||
|
@ -62,7 +62,7 @@ CNodeTptr BuildDepthwiseConv2D() {
|
||||||
convNode->primitive = std::make_unique<schema::PrimitiveT>();
|
convNode->primitive = std::make_unique<schema::PrimitiveT>();
|
||||||
convNode->primitive->value.type = schema::PrimitiveType_DepthwiseConv2D;
|
convNode->primitive->value.type = schema::PrimitiveType_DepthwiseConv2D;
|
||||||
auto prim1 = new schema::DepthwiseConv2DT;
|
auto prim1 = new schema::DepthwiseConv2DT;
|
||||||
prim1->padMode = schema::PadMode_SAME;
|
prim1->padMode = schema::PadMode_SAME_UPPER;
|
||||||
prim1->format = schema::Format_NHWC;
|
prim1->format = schema::Format_NHWC;
|
||||||
prim1->strideH = 1;
|
prim1->strideH = 1;
|
||||||
prim1->strideW = 1;
|
prim1->strideW = 1;
|
||||||
|
|
|
@ -42,7 +42,7 @@ CNodeTptr BuildConv2D() {
|
||||||
convNode->primitive = std::make_unique<schema::PrimitiveT>();
|
convNode->primitive = std::make_unique<schema::PrimitiveT>();
|
||||||
convNode->primitive->value.type = schema::PrimitiveType_Conv2D;
|
convNode->primitive->value.type = schema::PrimitiveType_Conv2D;
|
||||||
auto prim1 = new schema::Conv2DT;
|
auto prim1 = new schema::Conv2DT;
|
||||||
prim1->padMode = schema::PadMode_SAME;
|
prim1->padMode = schema::PadMode_SAME_UPPER;
|
||||||
prim1->format = schema::Format_NHWC;
|
prim1->format = schema::Format_NHWC;
|
||||||
prim1->strideH = 1;
|
prim1->strideH = 1;
|
||||||
prim1->strideW = 1;
|
prim1->strideW = 1;
|
||||||
|
@ -62,7 +62,7 @@ CNodeTptr BuildDepthwiseConv2D() {
|
||||||
convNode->primitive = std::make_unique<schema::PrimitiveT>();
|
convNode->primitive = std::make_unique<schema::PrimitiveT>();
|
||||||
convNode->primitive->value.type = schema::PrimitiveType_DepthwiseConv2D;
|
convNode->primitive->value.type = schema::PrimitiveType_DepthwiseConv2D;
|
||||||
auto prim1 = new schema::DepthwiseConv2DT;
|
auto prim1 = new schema::DepthwiseConv2DT;
|
||||||
prim1->padMode = schema::PadMode_SAME;
|
prim1->padMode = schema::PadMode_SAME_UPPER;
|
||||||
prim1->format = schema::Format_NHWC;
|
prim1->format = schema::Format_NHWC;
|
||||||
prim1->strideH = 1;
|
prim1->strideH = 1;
|
||||||
prim1->strideW = 1;
|
prim1->strideW = 1;
|
||||||
|
|
|
@ -42,7 +42,7 @@ CNodeTptr BuildConv2D() {
|
||||||
convNode->primitive = std::make_unique<schema::PrimitiveT>();
|
convNode->primitive = std::make_unique<schema::PrimitiveT>();
|
||||||
convNode->primitive->value.type = schema::PrimitiveType_Conv2D;
|
convNode->primitive->value.type = schema::PrimitiveType_Conv2D;
|
||||||
auto prim1 = new schema::Conv2DT;
|
auto prim1 = new schema::Conv2DT;
|
||||||
prim1->padMode = schema::PadMode_SAME;
|
prim1->padMode = schema::PadMode_SAME_UPPER;
|
||||||
prim1->format = schema::Format_NHWC;
|
prim1->format = schema::Format_NHWC;
|
||||||
prim1->strideH = 1;
|
prim1->strideH = 1;
|
||||||
prim1->strideW = 1;
|
prim1->strideW = 1;
|
||||||
|
@ -62,7 +62,7 @@ CNodeTptr BuildDepthwiseConv2D() {
|
||||||
convNode->primitive = std::make_unique<schema::PrimitiveT>();
|
convNode->primitive = std::make_unique<schema::PrimitiveT>();
|
||||||
convNode->primitive->value.type = schema::PrimitiveType_DepthwiseConv2D;
|
convNode->primitive->value.type = schema::PrimitiveType_DepthwiseConv2D;
|
||||||
auto prim1 = new schema::DepthwiseConv2DT;
|
auto prim1 = new schema::DepthwiseConv2DT;
|
||||||
prim1->padMode = schema::PadMode_SAME;
|
prim1->padMode = schema::PadMode_SAME_UPPER;
|
||||||
prim1->format = schema::Format_NHWC;
|
prim1->format = schema::Format_NHWC;
|
||||||
prim1->strideH = 1;
|
prim1->strideH = 1;
|
||||||
prim1->strideW = 1;
|
prim1->strideW = 1;
|
||||||
|
|
|
@ -48,7 +48,7 @@ CNodeTptr BuildConv2D(int with_bias_flag) {
|
||||||
convNode->primitive = std::make_unique<schema::PrimitiveT>();
|
convNode->primitive = std::make_unique<schema::PrimitiveT>();
|
||||||
convNode->primitive->value.type = schema::PrimitiveType_Conv2D;
|
convNode->primitive->value.type = schema::PrimitiveType_Conv2D;
|
||||||
auto prim1 = new schema::Conv2DT;
|
auto prim1 = new schema::Conv2DT;
|
||||||
prim1->padMode = schema::PadMode_SAME;
|
prim1->padMode = schema::PadMode_SAME_UPPER;
|
||||||
prim1->format = schema::Format_NHWC;
|
prim1->format = schema::Format_NHWC;
|
||||||
prim1->strideH = 1;
|
prim1->strideH = 1;
|
||||||
prim1->strideW = 1;
|
prim1->strideW = 1;
|
||||||
|
@ -74,7 +74,7 @@ CNodeTptr BuildDepthwiseConv2D(int with_bias_flag) {
|
||||||
convNode->primitive = std::make_unique<schema::PrimitiveT>();
|
convNode->primitive = std::make_unique<schema::PrimitiveT>();
|
||||||
convNode->primitive->value.type = schema::PrimitiveType_DepthwiseConv2D;
|
convNode->primitive->value.type = schema::PrimitiveType_DepthwiseConv2D;
|
||||||
auto prim1 = new schema::DepthwiseConv2DT;
|
auto prim1 = new schema::DepthwiseConv2DT;
|
||||||
prim1->padMode = schema::PadMode_SAME;
|
prim1->padMode = schema::PadMode_SAME_UPPER;
|
||||||
prim1->format = schema::Format_NHWC;
|
prim1->format = schema::Format_NHWC;
|
||||||
prim1->strideH = 1;
|
prim1->strideH = 1;
|
||||||
prim1->strideW = 1;
|
prim1->strideW = 1;
|
||||||
|
|
|
@ -71,6 +71,13 @@ STATUS OnnxDeConvParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::N
|
||||||
return RET_NULL_PTR;
|
return RET_NULL_PTR;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// set default params
|
||||||
|
attr->padMode = schema::PadMode_NOTSET;
|
||||||
|
attr->group = 1;
|
||||||
|
attr->strideW = 1;
|
||||||
|
attr->strideH = 1;
|
||||||
|
attr->dilateW = 1;
|
||||||
|
attr->dilateH = 1;
|
||||||
// set opdef each attr params
|
// set opdef each attr params
|
||||||
for (const auto &onnx_node_attr : onnx_node.attribute()) {
|
for (const auto &onnx_node_attr : onnx_node.attribute()) {
|
||||||
if (onnx_node_attr.name() == "group") {
|
if (onnx_node_attr.name() == "group") {
|
||||||
|
@ -121,6 +128,9 @@ STATUS OnnxDeConvParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::N
|
||||||
MS_LOG(ERROR) << "Unsupported format: " << onnx_node_attr.s().c_str();
|
MS_LOG(ERROR) << "Unsupported format: " << onnx_node_attr.s().c_str();
|
||||||
return RET_ERROR;
|
return RET_ERROR;
|
||||||
}
|
}
|
||||||
|
} else if (onnx_node_attr.name() == "output_padding") {
|
||||||
|
MS_LOG(ERROR) << "output_padding param hasn't been supported";
|
||||||
|
return RET_NOT_SUPPORT;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -22,8 +22,10 @@ namespace lite {
|
||||||
schema::PadMode OnnxNodeParser::GetOnnxPadMode(const onnx::AttributeProto &onnx_node_attr) {
|
schema::PadMode OnnxNodeParser::GetOnnxPadMode(const onnx::AttributeProto &onnx_node_attr) {
|
||||||
if (onnx_node_attr.s() == "NOTSET") {
|
if (onnx_node_attr.s() == "NOTSET") {
|
||||||
return schema::PadMode_NOTSET;
|
return schema::PadMode_NOTSET;
|
||||||
} else if (onnx_node_attr.s() == "SAME_UPPER" || onnx_node_attr.s() == "SAME_LOWER") {
|
} else if (onnx_node_attr.s() == "SAME_UPPER") {
|
||||||
return schema::PadMode_SAME;
|
return schema::PadMode_SAME_UPPER;
|
||||||
|
} else if (onnx_node_attr.s() == "SAME_LOWER") {
|
||||||
|
return schema::PadMode_SAME_LOWER;
|
||||||
} else if (onnx_node_attr.s() == "VALID") {
|
} else if (onnx_node_attr.s() == "VALID") {
|
||||||
return schema::PadMode_VALID;
|
return schema::PadMode_VALID;
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -49,6 +49,9 @@ STATUS TfliteCastParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_
|
||||||
return RET_NULL_PTR;
|
return RET_NULL_PTR;
|
||||||
}
|
}
|
||||||
attr->srcT = GetTfliteDataType(in_tensor->type);
|
attr->srcT = GetTfliteDataType(in_tensor->type);
|
||||||
|
if (attr->srcT == TypeId::kNumberTypeBool) {
|
||||||
|
attr->srcT = TypeId::kNumberTypeUInt8;
|
||||||
|
}
|
||||||
const auto &out_tensor = tflite_tensors[tflite_op->outputs[0]];
|
const auto &out_tensor = tflite_tensors[tflite_op->outputs[0]];
|
||||||
if (out_tensor == nullptr) {
|
if (out_tensor == nullptr) {
|
||||||
MS_LOG(ERROR) << "tensor is null";
|
MS_LOG(ERROR) << "tensor is null";
|
||||||
|
|
|
@ -18,6 +18,7 @@
|
||||||
#include <vector>
|
#include <vector>
|
||||||
#include <memory>
|
#include <memory>
|
||||||
#include <map>
|
#include <map>
|
||||||
|
#include <string>
|
||||||
|
|
||||||
namespace mindspore {
|
namespace mindspore {
|
||||||
namespace lite {
|
namespace lite {
|
||||||
|
|
|
@ -157,7 +157,7 @@ TypeId GetTfliteDataType(const tflite::TensorType &tflite_data_type) {
|
||||||
|
|
||||||
schema::PadMode GetPadMode(tflite::Padding tflite_padmode) {
|
schema::PadMode GetPadMode(tflite::Padding tflite_padmode) {
|
||||||
if (tflite_padmode == tflite::Padding_SAME) {
|
if (tflite_padmode == tflite::Padding_SAME) {
|
||||||
return schema::PadMode_SAME;
|
return schema::PadMode_SAME_UPPER;
|
||||||
} else if (tflite_padmode == tflite::Padding_VALID) {
|
} else if (tflite_padmode == tflite::Padding_VALID) {
|
||||||
return schema::PadMode_VALID;
|
return schema::PadMode_VALID;
|
||||||
} else {
|
} else {
|
||||||
|
@ -198,7 +198,7 @@ STATUS getPaddingParam(const std::unique_ptr<tflite::TensorT> &tensor, schema::P
|
||||||
int padDown = 0;
|
int padDown = 0;
|
||||||
int padLeft = 0;
|
int padLeft = 0;
|
||||||
int padRight = 0;
|
int padRight = 0;
|
||||||
if (pad_mode == schema::PadMode_SAME) {
|
if (pad_mode == schema::PadMode_SAME_UPPER) {
|
||||||
auto shape = tensor->shape;
|
auto shape = tensor->shape;
|
||||||
int H_input = shape.at(1);
|
int H_input = shape.at(1);
|
||||||
int W_input = shape.at(2);
|
int W_input = shape.at(2);
|
||||||
|
|
Loading…
Reference in New Issue