fix bug of parser for mindir models.

This commit is contained in:
wang_shaocong 2020-12-08 11:38:00 +08:00
parent 7bc084487e
commit 9d586123ea
9 changed files with 60 additions and 2 deletions

View File

@ -509,6 +509,23 @@ int ElementOptDivRelu6(const float *input0, const float *input1, float *output,
return NNACL_OK;
}
int ElementOptDivInt(const int *input0, const int *input1, int *output, const int element_size,
const ArithmeticParameter *param) {
if (param->in_elements_num0_ == 1) {
for (int index = 0; index < element_size; index++) {
output[index] = input0[0] / input1[index];
}
} else {
if (input1[0] == 0) {
return NNACL_ERRCODE_DIVISOR_ZERO;
}
for (int index = 0; index < element_size; index++) {
output[index] = input0[index] / input1[0];
}
}
return NNACL_OK;
}
int ElementMul(const float *input0, const float *input1, float *output, const int element_size) {
int index = 0;
#ifdef ENABLE_NEON

View File

@ -58,6 +58,8 @@ int ElementOptDivRelu(const float *input0, const float *input1, float *output, c
const ArithmeticParameter *param);
int ElementOptDivRelu6(const float *input0, const float *input1, float *output, const int element_size,
const ArithmeticParameter *param);
int ElementOptDivInt(const int *input0, const int *input1, int *output, const int element_size,
const ArithmeticParameter *param);
int ElementMul(const float *input0, const float *input1, float *output, const int element_size);
int ElementMulRelu(const float *input0, const float *input1, float *output, const int element_size);
int ElementMulRelu6(const float *input0, const float *input1, float *output, const int element_size);

View File

@ -59,6 +59,8 @@ int Activation::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr>
attr->type = schema::ActivationType_HSWISH;
} else if (prim.name() == "HSigmoid") {
attr->type = schema::ActivationType_HSIGMOID;
} else if (prim.name() == "Tanh") {
attr->type = schema::ActivationType_TANH;
}
this->primitive_->value.value = attr.release();
if (this->primitive_->value.value == nullptr) {

View File

@ -29,6 +29,35 @@ void Div::SetActivationType(int activation_type) {
this->primitive_->value.AsDiv()->activationType = (schema::ActivationType)activation_type;
}
int Div::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) {
if (this->primitive_ == nullptr) {
this->primitive_ = new (std::nothrow) schema::PrimitiveT;
if (this->primitive_ == nullptr) {
MS_LOG(ERROR) << "new primitiveT failed";
return RET_ERROR;
}
this->primitive_->value.type = schema::PrimitiveType_Div;
}
if (this->primitive_->value.type != schema::PrimitiveType_Div) {
MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type;
return RET_ERROR;
}
if (this->primitive_->value.value == nullptr) {
auto attr = new (std::nothrow) schema::DivT();
if (attr == nullptr) {
MS_LOG(ERROR) << "new primitiveT value failed";
return RET_ERROR;
}
this->primitive_->value.value = attr;
if (this->primitive_->value.value == nullptr) {
MS_LOG(ERROR) << "primitive value is nullptr";
return RET_ERROR;
}
}
return RET_OK;
}
#else
int Div::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) {
MS_ASSERT(nullptr != primitive);

View File

@ -32,6 +32,7 @@ class Div : public Arithmetic {
MS_DECLARE_PARENT(Div, Arithmetic);
explicit Div(schema::PrimitiveT *primitive) : Arithmetic(primitive) {}
void SetActivationType(int activation_type);
int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override;
#else
int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override;

View File

@ -581,6 +581,10 @@ std::shared_ptr<PrimitiveC> PrimitiveC::Create(const Primitive &prim, const std:
return NewPrimitiveC<Floor>(prim, inputs, quantType);
} else if (op_type == "Minimum") {
return NewPrimitiveC<Minimum>(prim, inputs, quantType);
} else if (op_type == "Div") {
return NewPrimitiveC<Div>(prim, inputs, quantType);
} else if (op_type == "Tanh") {
return NewPrimitiveC<Activation>(prim, inputs, quantType);
#ifdef SUPPORT_TRAIN
} else if (op_type == "SoftmaxCrossEntropyWithLogits") {
return NewPrimitiveC<SoftmaxCrossEntropy>(prim, inputs, quantType);

View File

@ -297,6 +297,7 @@ void ArithmeticCPUKernel::InitOptRunFunction() {
default:
arithmeticParameter_->broadcasting_ = false;
arithmetic_opt_run_ = ElementOptDiv;
arithmetic_opt_run_int_ = ElementOptDivInt;
break;
}
break;
@ -554,4 +555,5 @@ REG_KERNEL(kCPU, kNumberTypeInt32, PrimitiveType_FloorDiv, CpuArithmeticFp32Kern
REG_KERNEL(kCPU, kNumberTypeInt32, PrimitiveType_FloorMod, CpuArithmeticFp32KernelCreator)
REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_SquaredDifference, CpuArithmeticFp32KernelCreator)
REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_Eltwise, CpuArithmeticFp32KernelCreator)
REG_KERNEL(kCPU, kNumberTypeInt32, PrimitiveType_Div, CpuArithmeticFp32KernelCreator)
} // namespace mindspore::kernel

View File

@ -445,7 +445,8 @@ int AnfExporter::ConvertInputValueNode(const std::shared_ptr<AnfNode> &input_ano
auto valueAbstract = valueNode->abstract();
auto abstractScalar = utils::cast<abstract::AbstractScalarPtr>(valueAbstract);
auto typePtr = abstractScalar->GetTypeTrack();
paramTensor->dataType = typePtr->type_id();
// data of int64 is converted to int32 here.
paramTensor->dataType = kNumberTypeInt32;
paramTensor->dims = {1};
paramTensor->nodeType = schema::NodeType::NodeType_ValueNode;
int real_data = CastToInt(value).front();

View File

@ -1,4 +1,4 @@
# MindSpore Lite 端侧骨骼检测demoAndroid
# MindSpore Lite 端侧风格迁移demoAndroid
本示例程序演示了如何在端侧利用MindSpore Lite API以及MindSpore Lite风格迁移模型完成端侧推理根据demo内置的标准图片更换目标图片的艺术风格并在App图像预览界面中显示出来。