forked from mindspore-Ecosystem/mindspore
commit
11b2d4c8aa
|
@ -71,11 +71,28 @@ set(MICRO_CODER_SRC
|
|||
${CMAKE_CURRENT_SOURCE_DIR}/opcoders/file_collector.cc
|
||||
)
|
||||
|
||||
file(GLOB OPCODER_SRC_SERIALIZER ${CMAKE_CURRENT_SOURCE_DIR}
|
||||
opcoders/serializers/nnacl_serializer/*.cc
|
||||
)
|
||||
|
||||
file(GLOB_RECURSE OPCODER_SRC_FP32 RELATIVE ${CMAKE_CURRENT_SOURCE_DIR}
|
||||
opcoders/nnacl/fp32/*.cc
|
||||
)
|
||||
|
||||
|
||||
file(GLOB_RECURSE OPCODER_SRC_INT8 RELATIVE ${CMAKE_CURRENT_SOURCE_DIR}
|
||||
opcoders/nnacl/int8/*.cc
|
||||
)
|
||||
|
||||
|
||||
list(APPEND MICRO_CODER_SRC
|
||||
${MICRO_ALLOCATOR}
|
||||
${MICRO_GENERATOR}
|
||||
${MICRO_OPCODERS_BASE}
|
||||
${MICRO_OPCODERS_CMSIS_NN}
|
||||
${OPCODER_SRC_SERIALIZER}
|
||||
${OPCODER_SRC_FP32}
|
||||
${OPCODER_SRC_INT8}
|
||||
)
|
||||
|
||||
add_executable(codegen main.cc
|
||||
|
|
|
@ -0,0 +1,71 @@
|
|||
/**
|
||||
* Copyright 2021 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#include "micro/coder/opcoders/nnacl/fp32/activation_fp32_coder.h"
|
||||
#include <string>
|
||||
#include "nnacl/fp32/activation_fp32.h"
|
||||
#include "nnacl/op_base.h"
|
||||
#include "micro/coder/opcoders/serializers/nnacl_serializer/nnacl_fp32_serializer.h"
|
||||
#include "micro/coder/opcoders/file_collector.h"
|
||||
|
||||
using mindspore::schema::PrimitiveType_Activation;
|
||||
|
||||
namespace mindspore::lite::micro {
|
||||
|
||||
int ActivationFP32Coder::DoCode(CoderContext *const context) {
|
||||
// attribute
|
||||
auto *activation_parameter = reinterpret_cast<ActivationParameter *>(parameter_);
|
||||
int task_id = 0;
|
||||
int length = input_tensor_->ElementsNum();
|
||||
MS_CHECK_TRUE(thread_num_ > 0, "thread_num_ <= 0");
|
||||
int stride = UP_DIV(length, thread_num_);
|
||||
int count = MSMIN(stride, length - stride * task_id);
|
||||
|
||||
if (activation_parameter->type_ == schema::ActivationType_SIGMOID) {
|
||||
Collect(context, {"runtime/kernel/fp32/sigmoid.h"}, {"sigmoid.c"});
|
||||
} else {
|
||||
Collect(context, {"nnacl/fp32/activation.h"}, {"activation.c"});
|
||||
}
|
||||
nnacl::NNaclFp32Serializer code;
|
||||
switch (activation_parameter->type_) {
|
||||
case schema::ActivationType_RELU:
|
||||
code.CodeFunction("Fp32Relu", input_tensor_, count, output_tensor_);
|
||||
break;
|
||||
case schema::ActivationType_RELU6:
|
||||
code.CodeFunction("Fp32Relu6", input_tensor_, count, output_tensor_);
|
||||
break;
|
||||
case schema::ActivationType_LEAKY_RELU:
|
||||
code.CodeFunction("LRelu", input_tensor_, count, output_tensor_, activation_parameter->alpha_);
|
||||
break;
|
||||
case schema::ActivationType_SIGMOID:
|
||||
code.CodeFunction("Sigmoid", input_tensor_, count, output_tensor_);
|
||||
break;
|
||||
case schema::ActivationType_TANH:
|
||||
code.CodeFunction("Tanh", input_tensor_, count, output_tensor_);
|
||||
break;
|
||||
case schema::ActivationType_HSWISH:
|
||||
code.CodeFunction("HSwish", input_tensor_, count, output_tensor_);
|
||||
break;
|
||||
default:
|
||||
MS_LOG(ERROR) << "Activation type error";
|
||||
return RET_ERROR;
|
||||
}
|
||||
MS_LOG(DEBUG) << "ActivationFP32Code has been called";
|
||||
context->AppendCode(code.str());
|
||||
return lite::RET_OK;
|
||||
}
|
||||
|
||||
REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_Activation, CPUOpCoderCreator<ActivationFP32Coder>)
|
||||
} // namespace mindspore::lite::micro
|
|
@ -0,0 +1,40 @@
|
|||
/**
|
||||
* Copyright 2021 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MICRO_CODER_OPCODERS_FP32_ACTIVATIONFP32_CODER_H_
|
||||
#define MICRO_CODER_OPCODERS_FP32_ACTIVATIONFP32_CODER_H_
|
||||
|
||||
#include <vector>
|
||||
#include "micro/coder/opcoders/op_coder.h"
|
||||
|
||||
namespace mindspore::lite::micro {
|
||||
|
||||
class ActivationFP32Coder final : public OperatorCoder {
|
||||
public:
|
||||
ActivationFP32Coder(const std::vector<Tensor *> &in_tensors, const std::vector<Tensor *> &out_tensors,
|
||||
const Model::Node *node, size_t node_index, Target target)
|
||||
: OperatorCoder(in_tensors, out_tensors, node, node_index, target) {}
|
||||
|
||||
~ActivationFP32Coder() override = default;
|
||||
|
||||
int Prepare(CoderContext *const context) override { return RET_OK; }
|
||||
|
||||
int DoCode(CoderContext *const context) override;
|
||||
};
|
||||
|
||||
} // namespace mindspore::lite::micro
|
||||
|
||||
#endif // MICRO_CODER_OPCODERS_FP32__CODER_H_
|
|
@ -0,0 +1,48 @@
|
|||
/**
|
||||
* Copyright 2021 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "micro/coder/opcoders/nnacl/fp32/addn_fp32_coder.h"
|
||||
#include <string>
|
||||
#include "micro/coder/opcoders/serializers/nnacl_serializer/nnacl_fp32_serializer.h"
|
||||
#include "micro/coder/opcoders/file_collector.h"
|
||||
|
||||
using mindspore::schema::PrimitiveType_AddN;
|
||||
namespace mindspore::lite::micro {
|
||||
|
||||
int AddNFP32Coder::DoCode(CoderContext *const context) {
|
||||
Tensor *input0 = input_tensors_.at(kInputIndex);
|
||||
Tensor *input1 = input_tensors_.at(1);
|
||||
int elements_num = input0->ElementsNum();
|
||||
|
||||
// Get Tensor Pointer
|
||||
std::string input0_str = allocator_->GetRuntimeAddr(input0);
|
||||
std::string input1_str = allocator_->GetRuntimeAddr(input1);
|
||||
Collect(context, {"nnacl/kernel/fp32/add_fp32_slim.h"}, {"add_fp32_slim.c"});
|
||||
nnacl::NNaclFp32Serializer code;
|
||||
code.CodeFunction("ElementAdd", input0_str, input1_str, output_tensor_, elements_num);
|
||||
if (input_tensors_.size() > 2) {
|
||||
for (size_t i = 2; i < input_tensors_.size(); ++i) {
|
||||
std::string input_str = allocator_->GetRuntimeAddr(input_tensors_.at(i));
|
||||
code.CodeFunction("ElementAdd", input_str, output_tensor_, elements_num);
|
||||
}
|
||||
}
|
||||
context->AppendCode(code.str());
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_AddN, CPUOpCoderCreator<AddNFP32Coder>)
|
||||
|
||||
} // namespace mindspore::lite::micro
|
|
@ -0,0 +1,36 @@
|
|||
/**
|
||||
* Copyright 2021 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDSPORE_LITE_MICRO_CODER_OPCODERS_ADDN_FP32_CODER_H_
|
||||
#define MINDSPORE_LITE_MICRO_CODER_OPCODERS_ADDN_FP32_CODER_H_
|
||||
#include <vector>
|
||||
#include "micro/coder/opcoders/op_coder.h"
|
||||
|
||||
namespace mindspore::lite::micro {
|
||||
class AddNFP32Coder : public OperatorCoder {
|
||||
public:
|
||||
AddNFP32Coder(const std::vector<Tensor *> &in_tensors, const std::vector<Tensor *> &out_tensors,
|
||||
const Model::Node *node, size_t node_index, Target target)
|
||||
: OperatorCoder(in_tensors, out_tensors, node, node_index, target) {}
|
||||
|
||||
~AddNFP32Coder() override = default;
|
||||
|
||||
int Prepare(CoderContext *const context) override { return RET_OK; }
|
||||
|
||||
int DoCode(CoderContext *const context) override;
|
||||
};
|
||||
} // namespace mindspore::lite::micro
|
||||
#endif // MINDSPORE_LITE_MICRO_CODER_OPCODERS_ADDN_FP32_CODER_H_
|
|
@ -0,0 +1,373 @@
|
|||
/**
|
||||
* Copyright 2021 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#include "micro/coder/opcoders/nnacl/fp32/arithmetic_fp32_coder.h"
|
||||
#include <string>
|
||||
#include <map>
|
||||
#include <type_traits>
|
||||
#include "micro/coder/opcoders/file_collector.h"
|
||||
#include "nnacl/fp32/arithmetic_fp32.h"
|
||||
#include "micro/coder/log.h"
|
||||
|
||||
namespace mindspore::lite::micro {
|
||||
|
||||
int ArithmeticFP32Coder::Init(CoderContext *const context) {
|
||||
filter_tensor_ = input_tensors_.at(kWeightIndex);
|
||||
MS_CHECK_PTR(filter_tensor_);
|
||||
if (input_tensor_->data_type() == kNumberTypeFloat32 || input_tensor_->data_type() == kNumberTypeFloat16) {
|
||||
data_type_ = kDataTypeFloat;
|
||||
} else {
|
||||
data_type_ = kDataTypeInt;
|
||||
}
|
||||
arithmetic_parameter_->in_elements_num0_ = input_tensor_->ElementsNum();
|
||||
arithmetic_parameter_->in_elements_num1_ = filter_tensor_->ElementsNum();
|
||||
arithmetic_parameter_->out_elements_num_ = output_tensor_->ElementsNum();
|
||||
for (size_t i = 0; i < input_tensor_->shape().size(); i++) {
|
||||
if (arithmetic_parameter_->in_shape0_[i] == -1) {
|
||||
MS_CHECK_RET_CODE(
|
||||
memcpy_s(arithmetic_parameter_->in_shape0_, DEFAULT_ARITHMETIC_NDIMS * sizeof(int),
|
||||
static_cast<void *>(input_tensor_->shape().data()), input_tensor_->shape().size() * sizeof(int)),
|
||||
"memcpy_s in shape0 failed!");
|
||||
}
|
||||
}
|
||||
for (size_t i = 0; i < filter_tensor_->shape().size(); i++) {
|
||||
if (arithmetic_parameter_->in_shape1_[i] == -1) {
|
||||
MS_CHECK_RET_CODE(
|
||||
memcpy_s(arithmetic_parameter_->in_shape1_, DEFAULT_ARITHMETIC_NDIMS * sizeof(int),
|
||||
static_cast<void *>(filter_tensor_->shape().data()), filter_tensor_->shape().size() * sizeof(int)),
|
||||
"memcpy_s in shape1 failed!");
|
||||
}
|
||||
}
|
||||
for (size_t i = 0; i < output_tensor_->shape().size(); i++) {
|
||||
if (arithmetic_parameter_->out_shape_[i] == -1) {
|
||||
MS_CHECK_RET_CODE(
|
||||
memcpy_s(arithmetic_parameter_->out_shape_, DEFAULT_ARITHMETIC_NDIMS * sizeof(int),
|
||||
static_cast<void *>(output_tensor_->shape().data()), output_tensor_->shape().size() * sizeof(int)),
|
||||
"memcpy_s in out shape failed!");
|
||||
}
|
||||
}
|
||||
|
||||
if (arithmetic_parameter_->in_elements_num0_ == 1 || arithmetic_parameter_->in_elements_num1_ == 1) {
|
||||
switch (arithmetic_parameter_->op_parameter_.type_) {
|
||||
case PrimitiveType_Mul:
|
||||
switch (arithmetic_parameter_->activation_type_) {
|
||||
case schema::ActivationType_RELU:
|
||||
arithmetic_parameter_->broadcasting_ = false;
|
||||
arithmetic_opt_run_ = "ElementOptMulRelu";
|
||||
arithmetic_opt_run_int_ = "ElementOptMulReluInt";
|
||||
break;
|
||||
case schema::ActivationType_RELU6:
|
||||
arithmetic_parameter_->broadcasting_ = false;
|
||||
arithmetic_opt_run_ = "ElementOptMulRelu6";
|
||||
arithmetic_opt_run_int_ = "ElementOptMulRelu6Int";
|
||||
break;
|
||||
default:
|
||||
arithmetic_parameter_->broadcasting_ = false;
|
||||
arithmetic_opt_run_ = "ElementOptMul";
|
||||
arithmetic_opt_run_int_ = "ElementOptMulInt";
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case PrimitiveType_Add:
|
||||
switch (arithmetic_parameter_->activation_type_) {
|
||||
case schema::ActivationType_RELU:
|
||||
arithmetic_parameter_->broadcasting_ = false;
|
||||
arithmetic_opt_run_ = "ElementOptAddRelu";
|
||||
arithmetic_opt_run_int_ = "ElementOptAddReluInt";
|
||||
break;
|
||||
case schema::ActivationType_RELU6:
|
||||
arithmetic_parameter_->broadcasting_ = false;
|
||||
arithmetic_opt_run_ = "ElementOptAddRelu6";
|
||||
arithmetic_opt_run_int_ = "ElementOptAddRelu6Int";
|
||||
break;
|
||||
default:
|
||||
arithmetic_parameter_->broadcasting_ = false;
|
||||
arithmetic_opt_run_ = "ElementOptAdd";
|
||||
arithmetic_opt_run_int_ = "ElementOptAddInt";
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case PrimitiveType_Sub:
|
||||
switch (arithmetic_parameter_->activation_type_) {
|
||||
case schema::ActivationType_RELU:
|
||||
arithmetic_parameter_->broadcasting_ = false;
|
||||
arithmetic_opt_run_ = "ElementOptSubRelu";
|
||||
break;
|
||||
case schema::ActivationType_RELU6:
|
||||
arithmetic_parameter_->broadcasting_ = false;
|
||||
arithmetic_opt_run_ = "ElementOptSubRelu6";
|
||||
break;
|
||||
default:
|
||||
arithmetic_parameter_->broadcasting_ = false;
|
||||
arithmetic_opt_run_ = "ElementOptSub";
|
||||
break;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
int ArithmeticFP32Coder::BroadcastRun(const std::string &input0, const std::string &input1, const std::string &output,
|
||||
int dim, int out_count, int out_thread_stride,
|
||||
nnacl::NNaclFp32Serializer *const code) {
|
||||
if (dim > break_pos_) {
|
||||
if (data_type_ == kDataTypeInt) {
|
||||
*code << "\t\t" << arithmetic_run_int_ << "(((" << input0 << ") + " << out_thread_stride << "), ((" << input1
|
||||
<< ") + " << out_thread_stride << "), ((" << output << ") + " << out_thread_stride << "), " << out_count
|
||||
<< ");\n";
|
||||
|
||||
} else {
|
||||
*code << "\t\t" << arithmetic_run_ << "(((" << input0 << ") + " << out_thread_stride << "), ((" << input1
|
||||
<< ") + " << out_thread_stride << "), ((" << output << ") + " << out_thread_stride << "), " << out_count
|
||||
<< ");\n";
|
||||
}
|
||||
return RET_OK;
|
||||
}
|
||||
for (int i = 0; i < arithmetic_parameter_->out_shape_[dim]; ++i) {
|
||||
int pos0_ = arithmetic_parameter_->in_shape0_[dim] == 1 ? 0 : i;
|
||||
int pos1_ = arithmetic_parameter_->in_shape1_[dim] == 1 ? 0 : i;
|
||||
int error_code = BroadcastRun(input0 + "+" + std::to_string(pos0_ * arithmetic_parameter_->in_strides0_[dim]),
|
||||
input1 + "+" + std::to_string(pos1_ * arithmetic_parameter_->in_strides1_[dim]),
|
||||
output + "+" + std::to_string(i * arithmetic_parameter_->out_strides_[dim]), dim + 1,
|
||||
out_count, out_thread_stride, code);
|
||||
if (error_code != RET_OK) {
|
||||
return error_code;
|
||||
}
|
||||
}
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
int ArithmeticFP32Coder::Prepare(CoderContext *const context) {
|
||||
if (parameter_ == nullptr) {
|
||||
return RET_ERROR;
|
||||
}
|
||||
arithmetic_parameter_ = reinterpret_cast<ArithmeticParameter *>(parameter_);
|
||||
std::map<int, std::function<void()>> type_setters = {
|
||||
{PrimitiveType_Mul,
|
||||
[this]() {
|
||||
switch (arithmetic_parameter_->activation_type_) {
|
||||
case schema::ActivationType_RELU:
|
||||
arithmetic_run_ = "ElementMulRelu";
|
||||
arithmetic_run_int_ = "ElementMulReluInt";
|
||||
break;
|
||||
case schema::ActivationType_RELU6:
|
||||
arithmetic_run_ = "ElementMulRelu6";
|
||||
arithmetic_run_int_ = "ElementMulRelu6Int";
|
||||
break;
|
||||
default:
|
||||
arithmetic_run_ = "ElementMul";
|
||||
arithmetic_run_int_ = "ElementMulInt";
|
||||
break;
|
||||
}
|
||||
}},
|
||||
{PrimitiveType_Add,
|
||||
[this]() {
|
||||
switch (arithmetic_parameter_->activation_type_) {
|
||||
case schema::ActivationType_RELU:
|
||||
arithmetic_run_ = "ElementAddRelu";
|
||||
arithmetic_run_int_ = "ElementAddReluInt";
|
||||
break;
|
||||
case schema::ActivationType_RELU6:
|
||||
arithmetic_run_ = "ElementAddRelu6";
|
||||
arithmetic_run_int_ = "ElementAddRelu6Int";
|
||||
break;
|
||||
default:
|
||||
arithmetic_run_ = "ElementAdd";
|
||||
arithmetic_run_int_ = "ElementAddInt";
|
||||
break;
|
||||
}
|
||||
}},
|
||||
{PrimitiveType_Sub,
|
||||
[this]() {
|
||||
switch (arithmetic_parameter_->activation_type_) {
|
||||
case schema::ActivationType_RELU:
|
||||
arithmetic_run_ = "ElementSubRelu";
|
||||
break;
|
||||
case schema::ActivationType_RELU6:
|
||||
arithmetic_run_ = "ElementSubRelu6";
|
||||
break;
|
||||
default:
|
||||
arithmetic_run_ = "ElementSub";
|
||||
break;
|
||||
}
|
||||
}},
|
||||
{PrimitiveType_Div,
|
||||
[this]() {
|
||||
switch (arithmetic_parameter_->activation_type_) {
|
||||
case schema::ActivationType_RELU:
|
||||
arithmetic_run_ = "ElementDivRelu";
|
||||
break;
|
||||
case schema::ActivationType_RELU6:
|
||||
arithmetic_run_ = "ElementDivRelu6";
|
||||
break;
|
||||
default:
|
||||
arithmetic_run_ = "ElementDiv";
|
||||
break;
|
||||
}
|
||||
}},
|
||||
{PrimitiveType_LogicalAnd, [this]() { arithmetic_run_ = "ElementLogicalAnd"; }},
|
||||
{PrimitiveType_LogicalOr, [this]() { arithmetic_run_ = "ElementLogicalOr"; }},
|
||||
{PrimitiveType_Maximum, [this]() { arithmetic_run_ = "ElementMaximum"; }},
|
||||
{PrimitiveType_Minimum, [this]() { arithmetic_run_ = "ElementMinimum"; }},
|
||||
{PrimitiveType_FloorDiv, [this]() { arithmetic_run_ = "ElementFloorDiv"; }},
|
||||
{PrimitiveType_FloorMod, [this]() { arithmetic_run_ = "ElementFloorMod"; }},
|
||||
{PrimitiveType_Equal, [this]() { arithmetic_run_ = "ElementEqual"; }},
|
||||
{PrimitiveType_NotEqual, [this]() { arithmetic_run_ = "ElementNotEqual"; }},
|
||||
{PrimitiveType_Less, [this]() { arithmetic_run_ = "ElementLess"; }},
|
||||
{PrimitiveType_LessEqual, [this]() { arithmetic_run_ = "ElementLessEqual"; }},
|
||||
{PrimitiveType_Greater, [this]() { arithmetic_run_ = "ElementGreater"; }},
|
||||
{PrimitiveType_GreaterEqual, [this]() { arithmetic_run_ = "ElementGreaterEqual"; }},
|
||||
{PrimitiveType_SquaredDifference, [this]() { arithmetic_run_ = "ElementSquaredDifference"; }},
|
||||
};
|
||||
auto iter = type_setters.find(parameter_->type_);
|
||||
if (iter != type_setters.end()) {
|
||||
iter->second();
|
||||
} else {
|
||||
MS_LOG(ERROR) << "Error Operator type " << parameter_;
|
||||
arithmetic_run_ = "NULL";
|
||||
return RET_ERROR;
|
||||
}
|
||||
MS_CHECK_RET_CODE(Init(context), "do arothmetic code failed!");
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
int ArithmeticFP32Coder::DoCode(CoderContext *const context) {
|
||||
int task_id = 0;
|
||||
if (arithmetic_parameter_->broadcasting_) {
|
||||
outside_ = 1;
|
||||
for (auto i = arithmetic_parameter_->ndim_ - 1; i >= 0; --i) {
|
||||
if (arithmetic_parameter_->in_shape0_[i] != arithmetic_parameter_->in_shape1_[i]) {
|
||||
break_pos_ = i;
|
||||
break;
|
||||
}
|
||||
outside_ *= arithmetic_parameter_->out_shape_[i];
|
||||
}
|
||||
ComputeStrides(arithmetic_parameter_->in_shape0_, arithmetic_parameter_->in_strides0_,
|
||||
arithmetic_parameter_->ndim_);
|
||||
ComputeStrides(arithmetic_parameter_->in_shape1_, arithmetic_parameter_->in_strides1_,
|
||||
arithmetic_parameter_->ndim_);
|
||||
ComputeStrides(arithmetic_parameter_->out_shape_, arithmetic_parameter_->out_strides_,
|
||||
arithmetic_parameter_->ndim_);
|
||||
}
|
||||
|
||||
int element_num = output_tensor_->ElementsNum();
|
||||
MS_CHECK_TRUE(thread_num_ > 0, "thread_num_ <= 0");
|
||||
int stride = UP_DIV(element_num, thread_num_);
|
||||
int count = MSMIN(stride, element_num - stride * task_id);
|
||||
MS_CHECK_TRUE(!arithmetic_run_.empty(), "arithmetic_run function is nullptr!");
|
||||
nnacl::NNaclFp32Serializer code;
|
||||
/**
|
||||
* for nnacl's operator combine all arithmetic to nnalc/arithmetic.c
|
||||
* this solution is not suitable for micro, for the size of package.
|
||||
* */
|
||||
if (arithmetic_opt_run_ == "ElementOptSub" || arithmetic_run_ == "ElementSub") {
|
||||
Collect(context, {"nnacl/kernel/fp32/sub.h"}, {"sub.c"});
|
||||
} else if (arithmetic_opt_run_ == "ElementOptAdd" || arithmetic_run_ == "ElementAdd") {
|
||||
Collect(context, {"nnacl/kernel/fp32/add_fp32_slim.h"}, {"add_fp32_slim.c"});
|
||||
} else if (arithmetic_opt_run_ == "ElementOptMul" || arithmetic_run_ == "ElementMul") {
|
||||
Collect(context, {"nnacl/kernel/fp32/mul.h"}, {"mul.c"});
|
||||
} else if (arithmetic_run_ == "ElementAddRelu") {
|
||||
Collect(context, {"nnacl/kernel/fp32/add_relu.h"}, {"add_relu.c"});
|
||||
} else {
|
||||
Collect(context, {"nnacl/arithmetic_common.h", "nnacl/fp32/arithmetic.h"}, {"arithmetic_common.c", "arithmetic.c"});
|
||||
}
|
||||
|
||||
if (arithmetic_parameter_->broadcasting_) {
|
||||
stride = UP_DIV(outside_, thread_num_);
|
||||
out_count_ = MSMIN(stride, outside_ - stride * task_id);
|
||||
out_thread_stride_ = stride * task_id;
|
||||
std::string input0_str = allocator_->GetRuntimeAddr(input_tensor_);
|
||||
std::string input1_str = allocator_->GetRuntimeAddr(filter_tensor_);
|
||||
std::string output_str = allocator_->GetRuntimeAddr(output_tensor_);
|
||||
MS_CHECK_RET_CODE(BroadcastRun(input0_str, input1_str, output_str, 0, out_count_, out_thread_stride_, &code),
|
||||
"do broad cast code failed!");
|
||||
} else if (!arithmetic_opt_run_.empty()) {
|
||||
code.CodeStruct("arithmetic_parameter", *arithmetic_parameter_);
|
||||
if (arithmetic_parameter_->in_elements_num0_ == 1) {
|
||||
if (data_type_ == kDataTypeFloat) {
|
||||
code.CodeFunction(arithmetic_opt_run_, input_tensor_, filter_tensor_, output_tensor_, count,
|
||||
"&arithmetic_parameter");
|
||||
} else {
|
||||
code.CodeFunction(arithmetic_opt_run_int_, input_tensor_, filter_tensor_, output_tensor_, count,
|
||||
"&arithmetic_parameter");
|
||||
}
|
||||
} else if (arithmetic_parameter_->in_elements_num1_ == 1) {
|
||||
if (data_type_ == kDataTypeFloat) {
|
||||
code.CodeFunction(arithmetic_opt_run_, input_tensor_, filter_tensor_, output_tensor_, count,
|
||||
"&arithmetic_parameter");
|
||||
} else {
|
||||
code.CodeFunction(arithmetic_opt_run_int_, input_tensor_, filter_tensor_, output_tensor_, count,
|
||||
"&arithmetic_parameter");
|
||||
}
|
||||
} else {
|
||||
MS_LOG(ERROR) << "arithmetic opt code run: at least one of inputs is scalar";
|
||||
return RET_ERROR;
|
||||
}
|
||||
} else {
|
||||
if (data_type_ == kDataTypeFloat) {
|
||||
code.CodeFunction(arithmetic_run_, input_tensor_, filter_tensor_, output_tensor_, count);
|
||||
} else {
|
||||
code.CodeFunction(arithmetic_run_int_, input_tensor_, filter_tensor_, output_tensor_, count);
|
||||
}
|
||||
}
|
||||
MS_LOG(DEBUG) << "ArithmeticFP32Code has been called";
|
||||
context->AppendCode(code.str());
|
||||
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
REG_OPERATOR_CODER(kAllTargets, kNumberTypeInt32, PrimitiveType_Add, CPUOpCoderCreator<ArithmeticFP32Coder>)
|
||||
|
||||
REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_Mul, CPUOpCoderCreator<ArithmeticFP32Coder>)
|
||||
|
||||
REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_Add, CPUOpCoderCreator<ArithmeticFP32Coder>)
|
||||
|
||||
REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_Sub, CPUOpCoderCreator<ArithmeticFP32Coder>)
|
||||
|
||||
REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_Div, CPUOpCoderCreator<ArithmeticFP32Coder>)
|
||||
|
||||
REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_LogicalAnd, CPUOpCoderCreator<ArithmeticFP32Coder>)
|
||||
|
||||
REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_LogicalOr, CPUOpCoderCreator<ArithmeticFP32Coder>)
|
||||
|
||||
REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_Maximum, CPUOpCoderCreator<ArithmeticFP32Coder>)
|
||||
|
||||
REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_Minimum, CPUOpCoderCreator<ArithmeticFP32Coder>)
|
||||
|
||||
REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_FloorDiv, CPUOpCoderCreator<ArithmeticFP32Coder>)
|
||||
|
||||
REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_FloorMod, CPUOpCoderCreator<ArithmeticFP32Coder>)
|
||||
|
||||
REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_SquaredDifference,
|
||||
CPUOpCoderCreator<ArithmeticFP32Coder>)
|
||||
|
||||
REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_Equal, CPUOpCoderCreator<ArithmeticFP32Coder>)
|
||||
|
||||
REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_NotEqual, CPUOpCoderCreator<ArithmeticFP32Coder>)
|
||||
|
||||
REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_Less, CPUOpCoderCreator<ArithmeticFP32Coder>)
|
||||
|
||||
REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_LessEqual, CPUOpCoderCreator<ArithmeticFP32Coder>)
|
||||
|
||||
REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_Greater, CPUOpCoderCreator<ArithmeticFP32Coder>)
|
||||
|
||||
REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_GreaterEqual, CPUOpCoderCreator<ArithmeticFP32Coder>)
|
||||
|
||||
REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_Eltwise, CPUOpCoderCreator<ArithmeticFP32Coder>)
|
||||
|
||||
} // namespace mindspore::lite::micro
|
|
@ -0,0 +1,109 @@
|
|||
/**
|
||||
* Copyright 2021 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MICRO_CODER_OPCODERS_FP32_ARITHMETIC_FP32_CODER_H_
|
||||
#define MICRO_CODER_OPCODERS_FP32_ARITHMETIC_FP32_CODER_H_
|
||||
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#include "micro/coder/opcoders/op_coder.h"
|
||||
#include "nnacl/fp32/arithmetic_fp32.h"
|
||||
#include "micro/coder/opcoders/serializers/nnacl_serializer/nnacl_fp32_serializer.h"
|
||||
#define DEFAULT_ARITHMETIC_NDIMS 10
|
||||
namespace mindspore::lite::micro {
|
||||
|
||||
using mindspore::schema::PrimitiveType_Add;
|
||||
|
||||
using mindspore::schema::PrimitiveType_Div;
|
||||
|
||||
using mindspore::schema::PrimitiveType_Equal;
|
||||
|
||||
using mindspore::schema::PrimitiveType_FloorDiv;
|
||||
|
||||
using mindspore::schema::PrimitiveType_FloorMod;
|
||||
|
||||
using mindspore::schema::PrimitiveType_Greater;
|
||||
|
||||
using mindspore::schema::PrimitiveType_GreaterEqual;
|
||||
|
||||
using mindspore::schema::PrimitiveType_Less;
|
||||
|
||||
using mindspore::schema::PrimitiveType_LessEqual;
|
||||
|
||||
using mindspore::schema::PrimitiveType_LogicalAnd;
|
||||
|
||||
using mindspore::schema::PrimitiveType_LogicalOr;
|
||||
|
||||
using mindspore::schema::PrimitiveType_Maximum;
|
||||
|
||||
using mindspore::schema::PrimitiveType_Minimum;
|
||||
|
||||
using mindspore::schema::PrimitiveType_Mul;
|
||||
|
||||
using mindspore::schema::PrimitiveType_NotEqual;
|
||||
|
||||
using mindspore::schema::PrimitiveType_RealDiv;
|
||||
|
||||
using mindspore::schema::PrimitiveType_SquaredDifference;
|
||||
|
||||
using mindspore::schema::PrimitiveType_Sub;
|
||||
|
||||
using mindspore::schema::PrimitiveType_Eltwise;
|
||||
|
||||
using mindspore::schema::PrimitiveType_Minimum;
|
||||
|
||||
class ArithmeticFP32Coder final : public OperatorCoder {
|
||||
public:
|
||||
ArithmeticFP32Coder(const std::vector<Tensor *> &in_tensors, const std::vector<Tensor *> &out_tensors,
|
||||
const Model::Node *node, size_t node_index, Target target)
|
||||
: OperatorCoder(in_tensors, out_tensors, node, node_index, target) {}
|
||||
|
||||
~ArithmeticFP32Coder() override = default;
|
||||
|
||||
int Prepare(CoderContext *const context) override;
|
||||
|
||||
int DoCode(CoderContext *const context) override;
|
||||
|
||||
private:
|
||||
int Init(CoderContext *const context);
|
||||
|
||||
int BroadcastRun(const std::string &input0, const std::string &input1, const std::string &output, int dim,
|
||||
int out_count, int out_thread_stride, nnacl::NNaclFp32Serializer *const code);
|
||||
|
||||
int break_pos_{0};
|
||||
|
||||
int outside_{0};
|
||||
|
||||
int out_thread_stride_{0};
|
||||
|
||||
int out_count_{0};
|
||||
|
||||
ArithmeticParameter *arithmetic_parameter_{nullptr};
|
||||
|
||||
Tensor *filter_tensor_{nullptr};
|
||||
|
||||
std::string arithmetic_run_;
|
||||
|
||||
std::string arithmetic_run_int_;
|
||||
|
||||
std::string arithmetic_opt_run_;
|
||||
|
||||
std::string arithmetic_opt_run_int_;
|
||||
|
||||
LiteDataType data_type_{kDataTypeFloat};
|
||||
};
|
||||
} // namespace mindspore::lite::micro
|
||||
#endif // MICRO_CODER_OPCODERS_FP32_ARITHMETIC_FP32_CODER_H_
|
|
@ -0,0 +1,104 @@
|
|||
/**
|
||||
* Copyright 2021 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "micro/coder/opcoders/nnacl/fp32/arithmetic_self_fp32_coder.h"
|
||||
#include <string>
|
||||
#include <map>
|
||||
#include "nnacl/fp32/arithmetic_fp32.h"
|
||||
#include "micro/coder/opcoders/serializers/nnacl_serializer/nnacl_fp32_serializer.h"
|
||||
#include "micro/coder/opcoders/file_collector.h"
|
||||
|
||||
namespace mindspore::lite::micro {
|
||||
|
||||
int ArithmeticSelfFP32Coder::ReSize() {
|
||||
data_size_ = input_tensor_->ElementsNum();
|
||||
thread_sz_count_ = MSMIN(thread_num_, static_cast<int>(data_size_));
|
||||
MS_CHECK_TRUE(thread_sz_count_ > 0, "thread_sz_count_ <= 0");
|
||||
thread_sz_stride_ = UP_DIV(data_size_, thread_sz_count_);
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
int ArithmeticSelfFP32Coder::Prepare(CoderContext *const context) {
|
||||
if (parameter_ == nullptr) {
|
||||
return RET_ERROR;
|
||||
}
|
||||
std::map<int, std::function<void()>> type_setters = {
|
||||
{PrimitiveType_Abs, [this]() { arithmetic_self_run_ = "ElementAbs"; }},
|
||||
{PrimitiveType_Cos, [this]() { arithmetic_self_run_ = "ElementCos"; }},
|
||||
{PrimitiveType_Log, [this]() { arithmetic_self_run_ = "ElementLog"; }},
|
||||
{PrimitiveType_Square, [this]() { arithmetic_self_run_ = "ElementSquare"; }},
|
||||
{PrimitiveType_Sqrt, [this]() { arithmetic_self_run_ = "ElementSqrt"; }},
|
||||
{PrimitiveType_Rsqrt, [this]() { arithmetic_self_run_ = "ElementRsqrt"; }},
|
||||
{PrimitiveType_Sin, [this]() { arithmetic_self_run_ = "ElementSin"; }},
|
||||
{PrimitiveType_LogicalNot, [this]() { arithmetic_self_run_ = "ElementLogicalNot"; }},
|
||||
{PrimitiveType_Floor, [this]() { arithmetic_self_run_ = "ElementFloor"; }},
|
||||
{PrimitiveType_Ceil, [this]() { arithmetic_self_run_ = "ElementCeil"; }},
|
||||
{PrimitiveType_Round, [this]() { arithmetic_self_run_ = "ElementRound"; }},
|
||||
{PrimitiveType_Neg, [this]() { arithmetic_self_run_ = "ElementNegative"; }},
|
||||
};
|
||||
auto iter = type_setters.find(parameter_->type_);
|
||||
if (iter != type_setters.end()) {
|
||||
iter->second();
|
||||
} else {
|
||||
MS_LOG(ERROR) << "Error Operator type " << parameter_;
|
||||
return RET_ERROR;
|
||||
}
|
||||
MS_CHECK_RET_CODE(ReSize(), "ReSize failed");
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
int ArithmeticSelfFP32Coder::DoCode(CoderContext *const context) {
|
||||
int task_id = 0;
|
||||
int size = MSMIN(thread_sz_stride_, static_cast<int>(data_size_ - task_id * thread_sz_stride_));
|
||||
|
||||
MS_CHECK_TRUE(!arithmetic_self_run_.empty(), "arithmetic_run function is nullptr!");
|
||||
|
||||
Collect(context, {"nnacl/arithmetic_common.h", "nnacl/fp32/arithmetic_self.h"}, {"nnacl/fp32/arithmetic_self.c"});
|
||||
nnacl::NNaclFp32Serializer code;
|
||||
code.CodeFunction(arithmetic_self_run_, input_tensor_, output_tensor_, size);
|
||||
|
||||
MS_LOG(DEBUG) << "ArithmeticSelfFP32Coder has been called";
|
||||
context->AppendCode(code.str());
|
||||
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_Abs, CPUOpCoderCreator<ArithmeticSelfFP32Coder>)
|
||||
|
||||
REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_Cos, CPUOpCoderCreator<ArithmeticSelfFP32Coder>)
|
||||
|
||||
REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_Log, CPUOpCoderCreator<ArithmeticSelfFP32Coder>)
|
||||
|
||||
REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_Square, CPUOpCoderCreator<ArithmeticSelfFP32Coder>)
|
||||
|
||||
REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_Sqrt, CPUOpCoderCreator<ArithmeticSelfFP32Coder>)
|
||||
|
||||
REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_Rsqrt, CPUOpCoderCreator<ArithmeticSelfFP32Coder>)
|
||||
|
||||
REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_Sin, CPUOpCoderCreator<ArithmeticSelfFP32Coder>)
|
||||
|
||||
REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_LogicalNot,
|
||||
CPUOpCoderCreator<ArithmeticSelfFP32Coder>)
|
||||
|
||||
REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_Floor, CPUOpCoderCreator<ArithmeticSelfFP32Coder>)
|
||||
|
||||
REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_Ceil, CPUOpCoderCreator<ArithmeticSelfFP32Coder>)
|
||||
|
||||
REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_Round, CPUOpCoderCreator<ArithmeticSelfFP32Coder>)
|
||||
|
||||
REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_Neg, CPUOpCoderCreator<ArithmeticSelfFP32Coder>)
|
||||
|
||||
} // namespace mindspore::lite::micro
|
|
@ -0,0 +1,109 @@
|
|||
/**
|
||||
* Copyright 2021 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MICRO_CODER_OPCODERS_FP32_ARITHMETIC_SELF_FP32_CODER_H_
|
||||
#define MICRO_CODER_OPCODERS_FP32_ARITHMETIC_SELF_FP32_CODER_H_
|
||||
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include "micro/coder/opcoders/op_coder.h"
|
||||
#include "nnacl/fp32/arithmetic_self_fp32.h"
|
||||
#include "nnacl/arithmetic_self_parameter.h"
|
||||
|
||||
namespace mindspore::lite::micro {
|
||||
|
||||
using mindspore::schema::PrimitiveType_Abs;
|
||||
|
||||
using mindspore::schema::PrimitiveType_Add;
|
||||
|
||||
using mindspore::schema::PrimitiveType_AddN;
|
||||
|
||||
using mindspore::schema::PrimitiveType_Neg;
|
||||
|
||||
using mindspore::schema::PrimitiveType_Ceil;
|
||||
|
||||
using mindspore::schema::PrimitiveType_Cos;
|
||||
|
||||
using mindspore::schema::PrimitiveType_Div;
|
||||
|
||||
using mindspore::schema::PrimitiveType_Equal;
|
||||
|
||||
using mindspore::schema::PrimitiveType_Floor;
|
||||
|
||||
using mindspore::schema::PrimitiveType_FloorDiv;
|
||||
|
||||
using mindspore::schema::PrimitiveType_FloorMod;
|
||||
|
||||
using mindspore::schema::PrimitiveType_Greater;
|
||||
|
||||
using mindspore::schema::PrimitiveType_GreaterEqual;
|
||||
|
||||
using mindspore::schema::PrimitiveType_Less;
|
||||
|
||||
using mindspore::schema::PrimitiveType_LessEqual;
|
||||
|
||||
using mindspore::schema::PrimitiveType_Log;
|
||||
|
||||
using mindspore::schema::PrimitiveType_LogicalAnd;
|
||||
|
||||
using mindspore::schema::PrimitiveType_LogicalOr;
|
||||
|
||||
using mindspore::schema::PrimitiveType_LogicalNot;
|
||||
|
||||
using mindspore::schema::PrimitiveType_Maximum;
|
||||
|
||||
using mindspore::schema::PrimitiveType_Minimum;
|
||||
|
||||
using mindspore::schema::PrimitiveType_Mul;
|
||||
|
||||
using mindspore::schema::PrimitiveType_NotEqual;
|
||||
|
||||
using mindspore::schema::PrimitiveType_RealDiv;
|
||||
|
||||
using mindspore::schema::PrimitiveType_Round;
|
||||
|
||||
using mindspore::schema::PrimitiveType_Rsqrt;
|
||||
|
||||
using mindspore::schema::PrimitiveType_Sqrt;
|
||||
|
||||
using mindspore::schema::PrimitiveType_SquaredDifference;
|
||||
|
||||
using mindspore::schema::PrimitiveType_Sub;
|
||||
|
||||
using mindspore::schema::PrimitiveType_Sin;
|
||||
|
||||
using mindspore::schema::PrimitiveType_Square;
|
||||
|
||||
class ArithmeticSelfFP32Coder final : public OperatorCoder {
|
||||
public:
|
||||
ArithmeticSelfFP32Coder(const std::vector<Tensor *> &in_tensors, const std::vector<Tensor *> &out_tensors,
|
||||
const Model::Node *node, size_t node_index, Target target)
|
||||
: OperatorCoder(in_tensors, out_tensors, node, node_index, target) {}
|
||||
int Prepare(CoderContext *const context) override;
|
||||
int DoCode(CoderContext *const context) override;
|
||||
~ArithmeticSelfFP32Coder() override = default;
|
||||
|
||||
private:
|
||||
int ReSize();
|
||||
|
||||
private:
|
||||
int thread_sz_count_{0};
|
||||
int thread_sz_stride_{0};
|
||||
size_t data_size_{0};
|
||||
std::string arithmetic_self_run_;
|
||||
};
|
||||
} // namespace mindspore::lite::micro
|
||||
#endif // MICRO_CODER_OPCODERS_FP32_ARITHMETIC_SELF_FP32_CODER_H_
|
|
@ -0,0 +1,55 @@
|
|||
/**
|
||||
* Copyright 2021 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "micro/coder/opcoders/nnacl/fp32/assign_add_fp32_coder.h"
|
||||
#include <string>
|
||||
#include "schema/inner/ops_generated.h"
|
||||
#include "micro/coder/opcoders/serializers/nnacl_serializer/nnacl_fp32_serializer.h"
|
||||
|
||||
namespace mindspore::lite::micro {
|
||||
|
||||
using mindspore::schema::PrimitiveType_AssignAdd;
|
||||
|
||||
int AssignAddFP32Coder::Prepare(CoderContext *const context) { return RET_OK; }
|
||||
|
||||
int AssignAddFP32Coder::DoCode(CoderContext *const context) {
|
||||
MS_CHECK_TRUE(input_tensors_.size() == 2, "inputs size is not equal to two");
|
||||
Tensor *input0 = input_tensors_.at(0);
|
||||
Tensor *input1 = input_tensors_.at(1);
|
||||
if (input0->Size() != input1->Size()) {
|
||||
MS_LOG(ERROR) << "input0 size: " << input0->Size() << ", input1 size: " << input1->Size();
|
||||
return RET_ERROR;
|
||||
}
|
||||
|
||||
nnacl::NNaclFp32Serializer code;
|
||||
// Get Tensor Pointer
|
||||
std::string input0_str = allocator_->GetRuntimeAddr(input0);
|
||||
std::string input1_str = allocator_->GetRuntimeAddr(input1);
|
||||
size_t data_size = input0->Size();
|
||||
// assign add, just add input1'data to input0
|
||||
code << "\t\tfor (int i = 0; i < " << data_size << "; ++i) {\n";
|
||||
code << "\t\t\t(" << input0_str << ")[i] += (" << input1_str << ")[i];\n";
|
||||
code << "\t\t}\n";
|
||||
code.CodeFunction("memcpy", output_tensor_, input0_str, data_size);
|
||||
context->AppendCode(code.str());
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_AssignAdd, CPUOpCoderCreator<AssignAddFP32Coder>)
|
||||
|
||||
REG_OPERATOR_CODER(kAllTargets, kNumberTypeInt32, PrimitiveType_AssignAdd, CPUOpCoderCreator<AssignAddFP32Coder>)
|
||||
|
||||
} // namespace mindspore::lite::micro
|
|
@ -0,0 +1,37 @@
|
|||
/**
|
||||
* Copyright 2021 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDSPORE_LITE_MICRO_CODER_OPCODERS_ASSIGN_ADD_FP32_CODER_H_
|
||||
#define MINDSPORE_LITE_MICRO_CODER_OPCODERS_ASSIGN_ADD_FP32_CODER_H_
|
||||
|
||||
#include <vector>
|
||||
#include "micro/coder/opcoders/op_coder.h"
|
||||
#include "nnacl/base/tile_base.h"
|
||||
|
||||
namespace mindspore::lite::micro {
|
||||
class AssignAddFP32Coder : public OperatorCoder {
|
||||
public:
|
||||
AssignAddFP32Coder(const std::vector<Tensor *> &in_tensors, const std::vector<Tensor *> &out_tensors,
|
||||
const Model::Node *node, size_t node_index, Target target)
|
||||
: OperatorCoder(in_tensors, out_tensors, node, node_index, target) {}
|
||||
~AssignAddFP32Coder() override = default;
|
||||
|
||||
int Prepare(CoderContext *const context) override;
|
||||
|
||||
int DoCode(CoderContext *const context) override;
|
||||
};
|
||||
} // namespace mindspore::lite::micro
|
||||
#endif // MINDSPORE_LITE_MICRO_CODER_OPCODERS_ASSIGN_ADD_FP32_CODER_H_
|
|
@ -0,0 +1,69 @@
|
|||
/**
|
||||
* Copyright 2021 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#include "micro/coder/opcoders/nnacl/fp32/batchnorm_fp32_coder.h"
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include "nnacl/fp32/batchnorm_fp32.h"
|
||||
#include "src/ops/batch_norm.h"
|
||||
#include "nnacl/op_base.h"
|
||||
#include "micro/coder/opcoders/file_collector.h"
|
||||
#include "micro/coder/opcoders/serializers/nnacl_serializer/nnacl_fp32_serializer.h"
|
||||
|
||||
using mindspore::schema::PrimitiveType_BatchNorm;
|
||||
|
||||
namespace mindspore::lite::micro {
|
||||
|
||||
int BatchnormFP32Coder::Init() {
|
||||
auto bn_parameter = reinterpret_cast<BatchNormParameter *>(parameter_);
|
||||
auto bn_prim = reinterpret_cast<const mindspore::lite::BatchNorm *>(OperatorCoder::primitive());
|
||||
bn_parameter->epsilon_ = bn_prim->GetEpsilon();
|
||||
|
||||
std::vector<int> input_shapes = input_tensor_->shape();
|
||||
if (input_shapes.empty()) {
|
||||
return RET_ERROR;
|
||||
}
|
||||
int n_dim = static_cast<int>(input_shapes.size());
|
||||
bn_parameter->channel_ = input_shapes.at(n_dim - 1);
|
||||
bn_parameter->unit_ = 1;
|
||||
for (int i = 0; i < n_dim - 1; i++) {
|
||||
bn_parameter->unit_ *= input_shapes.at(i);
|
||||
}
|
||||
bn_parameter->op_parameter_.thread_num_ = MSMIN(bn_parameter->op_parameter_.thread_num_, bn_parameter->unit_);
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
int BatchnormFP32Coder::DoCode(CoderContext *const context) {
|
||||
// attribute
|
||||
int task_id = 0;
|
||||
auto bn_parameter = reinterpret_cast<BatchNormParameter *>(parameter_);
|
||||
if (Init() != RET_OK) {
|
||||
MS_LOG(ERROR) << "BatchnormFP32Coder Init error";
|
||||
return RET_ERROR;
|
||||
}
|
||||
MS_CHECK_TRUE(input_tensors_.size() == 3, "inputs size is not equal to three");
|
||||
Tensor *mean_tensor = input_tensors_.at(1);
|
||||
Tensor *var_tensor = input_tensors_.at(2);
|
||||
Collect(context, {"nnacl/fp32/batchnorm.h"}, {"nnacl/fp32/batchnorm.c"});
|
||||
nnacl::NNaclFp32Serializer code;
|
||||
code.CodeStruct("bn_parameter", *bn_parameter);
|
||||
code.CodeFunction("BatchNorm", output_tensor_, input_tensor_, mean_tensor, var_tensor, task_id, "&bn_parameter");
|
||||
MS_LOG(INFO) << "BatchnormFP32Code has been called";
|
||||
context->AppendCode(code.str());
|
||||
return lite::RET_OK;
|
||||
}
|
||||
|
||||
REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_BatchNorm, CPUOpCoderCreator<BatchnormFP32Coder>)
|
||||
} // namespace mindspore::lite::micro
|
|
@ -0,0 +1,43 @@
|
|||
/**
|
||||
* Copyright 2021 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MICRO_CODER_OPCODERS_FP32_BATCHNORM_FP32_CODER_H_
|
||||
#define MICRO_CODER_OPCODERS_FP32_BATCHNORM_FP32_CODER_H_
|
||||
|
||||
#include <vector>
|
||||
#include "micro/coder/opcoders/op_coder.h"
|
||||
|
||||
namespace mindspore::lite::micro {
|
||||
|
||||
class BatchnormFP32Coder final : public OperatorCoder {
|
||||
public:
|
||||
BatchnormFP32Coder(const std::vector<Tensor *> &in_tensors, const std::vector<Tensor *> &out_tensors,
|
||||
const Model::Node *node, size_t node_index, Target target)
|
||||
: OperatorCoder(in_tensors, out_tensors, node, node_index, target) {}
|
||||
|
||||
~BatchnormFP32Coder() override = default;
|
||||
|
||||
int Prepare(CoderContext *const context) override { return RET_OK; }
|
||||
|
||||
int DoCode(CoderContext *const context) override;
|
||||
|
||||
private:
|
||||
int Init();
|
||||
};
|
||||
|
||||
} // namespace mindspore::lite::micro
|
||||
|
||||
#endif // MICRO_CODER_OPCODERS_FP32_CODER_H_
|
|
@ -0,0 +1,77 @@
|
|||
/**
|
||||
* Copyright 2021 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#include "micro/coder/opcoders/nnacl/fp32/concat_fp32_coder.h"
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include "micro/coder/opcoders/serializers/nnacl_serializer/nnacl_fp32_serializer.h"
|
||||
#include "micro/coder/opcoders/file_collector.h"
|
||||
|
||||
using mindspore::schema::PrimitiveType_Concat;
|
||||
|
||||
namespace mindspore::lite::micro {
|
||||
|
||||
int ConcatFP32Coder::Prepare(CoderContext *const context) {
|
||||
concat_param_ = reinterpret_cast<ConcatParameter *>(parameter_);
|
||||
return ReSize();
|
||||
}
|
||||
|
||||
int ConcatFP32Coder::ReSize() {
|
||||
axis_ = concat_param_->axis_ >= 0 ? concat_param_->axis_
|
||||
: static_cast<int>(input_tensor_->shape().size()) + concat_param_->axis_;
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
int ConcatFP32Coder::DoCode(CoderContext *const context) {
|
||||
Collect(context, {"nnacl/fp32/concat.h"}, {"nnacl/fp32/concat.c"});
|
||||
|
||||
size_t input_num = input_tensors_.size();
|
||||
|
||||
nnacl::NNaclFp32Serializer code;
|
||||
code << "\t\tvoid *inputs_addr[] = {";
|
||||
for (size_t i = 0; i < input_num; ++i) {
|
||||
code << allocator_->GetRuntimeAddr(input_tensors_.at(i)) << ", ";
|
||||
}
|
||||
code << "};\n";
|
||||
|
||||
size_t i;
|
||||
for (i = 0; i < input_num; ++i) {
|
||||
code << "\t\tint shape_" << i << "[] = {";
|
||||
for (auto &shape : input_tensors_.at(i)->shape()) {
|
||||
code << shape << ", ";
|
||||
}
|
||||
code << "};\n";
|
||||
}
|
||||
|
||||
code << "\t\tint shape_" << i << "[] = {";
|
||||
for (auto &shape : output_tensor_->shape()) {
|
||||
code << shape << ", ";
|
||||
}
|
||||
code << "};\n";
|
||||
|
||||
code << "\t\tint *inputs_output_shape[] = {";
|
||||
for (i = 0; i <= input_num; ++i) {
|
||||
code << "shape_" << i << ", ";
|
||||
}
|
||||
code << "};\n";
|
||||
|
||||
code.CodeFunction("Concat", "inputs_addr", input_num, axis_, "inputs_output_shape", output_tensor_->shape().size(),
|
||||
output_tensor_, 0, thread_num_);
|
||||
context->AppendCode(code.str());
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_Concat, CPUOpCoderCreator<ConcatFP32Coder>)
|
||||
} // namespace mindspore::lite::micro
|
|
@ -0,0 +1,42 @@
|
|||
/**
|
||||
* Copyright 2021 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDSPORE_LITE_MICRO_CODER_OPCODERS_CONCAT_FP32_CODER_H_
|
||||
#define MINDSPORE_LITE_MICRO_CODER_OPCODERS_CONCAT_FP32_CODER_H_
|
||||
|
||||
#include <vector>
|
||||
#include "micro/coder/opcoders/op_coder.h"
|
||||
#include "nnacl/concat_parameter.h"
|
||||
|
||||
namespace mindspore::lite::micro {
|
||||
class ConcatFP32Coder : public OperatorCoder {
|
||||
public:
|
||||
ConcatFP32Coder(const std::vector<Tensor *> &in_tensors, const std::vector<Tensor *> &out_tensors,
|
||||
const Model::Node *node, size_t node_index, Target target)
|
||||
: OperatorCoder(in_tensors, out_tensors, node, node_index, target) {}
|
||||
~ConcatFP32Coder() override = default;
|
||||
|
||||
int Prepare(CoderContext *const context) override;
|
||||
int DoCode(CoderContext *const context) override;
|
||||
|
||||
private:
|
||||
int ReSize();
|
||||
|
||||
int axis_{0};
|
||||
ConcatParameter *concat_param_{nullptr};
|
||||
};
|
||||
} // namespace mindspore::lite::micro
|
||||
#endif // MINDSPORE_LITE_MICRO_CODER_OPCODERS_CONCAT_FP32_CODER_H_
|
|
@ -0,0 +1,52 @@
|
|||
/**
|
||||
* Copyright 2021 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "micro/coder/opcoders/nnacl/fp32/expand_dims_fp32_coder.h"
|
||||
#include <string>
|
||||
#include "micro/coder/opcoders/file_collector.h"
|
||||
#include "micro/coder/opcoders/serializers/nnacl_serializer/nnacl_fp32_serializer.h"
|
||||
|
||||
using mindspore::schema::PrimitiveType_ExpandDims;
|
||||
|
||||
namespace mindspore::lite::micro {
|
||||
int ExpandDimsFP32Coder::Prepare(CoderContext *const context) { return ReSize(); }
|
||||
|
||||
int ExpandDimsFP32Coder::ReSize() {
|
||||
data_size_ = input_tensor_->ElementsNum();
|
||||
thread_sz_count_ = MSMIN(thread_num_, static_cast<int>(data_size_));
|
||||
MS_CHECK_TRUE(thread_sz_count_ > 0, "thread_sz_count_ is less or equal to 0");
|
||||
thread_sz_stride_ = UP_DIV(data_size_, thread_sz_count_);
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
int ExpandDimsFP32Coder::DoCode(CoderContext *const context) {
|
||||
// generate code .h .c
|
||||
Collect(context, {"nnacl/fp32/expandDims.h"}, {"nnacl/fp32/expandDims.c"});
|
||||
nnacl::NNaclFp32Serializer code;
|
||||
int task_id = 0;
|
||||
size_t size = MSMIN(thread_sz_stride_, static_cast<int>(data_size_ - task_id * thread_sz_stride_));
|
||||
if (!size) {
|
||||
return RET_OK;
|
||||
}
|
||||
code.CodeFunction("ExpandDims", input_tensor_, output_tensor_, size * sizeof(float));
|
||||
context->AppendCode(code.str());
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_ExpandDims, CPUOpCoderCreator<ExpandDimsFP32Coder>)
|
||||
|
||||
REG_OPERATOR_CODER(kAllTargets, kNumberTypeInt32, PrimitiveType_ExpandDims, CPUOpCoderCreator<ExpandDimsFP32Coder>)
|
||||
} // namespace mindspore::lite::micro
|
|
@ -0,0 +1,42 @@
|
|||
/**
|
||||
* Copyright 2021 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDSPORE_LITE_MICRO_CODER_OPCODERS_EXPANDDIMS_FP32_CODER_H_
|
||||
#define MINDSPORE_LITE_MICRO_CODER_OPCODERS_EXPANDDIMS_FP32_CODER_H_
|
||||
|
||||
#include <vector>
|
||||
#include "micro/coder/opcoders/op_coder.h"
|
||||
|
||||
namespace mindspore::lite::micro {
|
||||
class ExpandDimsFP32Coder : public OperatorCoder {
|
||||
public:
|
||||
ExpandDimsFP32Coder(const std::vector<Tensor *> &in_tensors, const std::vector<Tensor *> &out_tensors,
|
||||
const Model::Node *node, size_t node_index, Target target)
|
||||
: OperatorCoder(in_tensors, out_tensors, node, node_index, target) {}
|
||||
|
||||
~ExpandDimsFP32Coder() override = default;
|
||||
int Prepare(CoderContext *const context) override;
|
||||
|
||||
int DoCode(CoderContext *const context) override;
|
||||
|
||||
private:
|
||||
int ReSize();
|
||||
int thread_sz_count_{0};
|
||||
int thread_sz_stride_{0};
|
||||
size_t data_size_{0};
|
||||
};
|
||||
} // namespace mindspore::lite::micro
|
||||
#endif // MINDSPORE_LITE_MICRO_CODER_OPCODERS_EXPANDDIMS_FP32_CODER_H_
|
|
@ -0,0 +1,69 @@
|
|||
/**
|
||||
* Copyright 2021 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "micro/coder/opcoders/nnacl/fp32/gather_fp32_coder.h"
|
||||
#include <string>
|
||||
#include "nnacl/gather_parameter.h"
|
||||
#include "micro/coder/opcoders/serializers/nnacl_serializer/nnacl_fp32_serializer.h"
|
||||
#include "micro/coder/log.h"
|
||||
#include "micro/coder/opcoders/file_collector.h"
|
||||
|
||||
using mindspore::schema::PrimitiveType_Gather;
|
||||
|
||||
namespace mindspore::lite::micro {
|
||||
|
||||
int GatherFP32Coder::Prepare(CoderContext *const context) { return RET_OK; }
|
||||
|
||||
int GatherFP32Coder::DoCode(CoderContext *context) {
|
||||
Tensor *input0 = input_tensors_.at(0);
|
||||
Tensor *input1 = input_tensors_.at(1);
|
||||
|
||||
// generate code .h .c
|
||||
Collect(context, {"nnacl/fp32/gather.h"}, {"nnacl/fp32/gather.c"});
|
||||
|
||||
nnacl::NNaclFp32Serializer code;
|
||||
std::vector<int> in_shape = input0->shape();
|
||||
int in_rank = in_shape.size();
|
||||
int indices_element_size = input1->ElementsNum();
|
||||
int axis = (reinterpret_cast<GatherParameter *>(parameter_))->axis_;
|
||||
MS_CHECK_TRUE(static_cast<int>(in_shape.size()) >= axis, "invalid axis in gather parameter");
|
||||
const int limit = in_shape.at(axis);
|
||||
|
||||
int outer_size = 1, inner_size = 1;
|
||||
for (int i = 0; i < axis; ++i) {
|
||||
outer_size *= in_shape.at(i);
|
||||
}
|
||||
for (int i = axis + 1; i < in_rank; ++i) {
|
||||
inner_size *= in_shape.at(i);
|
||||
}
|
||||
int task_id = 0;
|
||||
MS_CHECK_TRUE(thread_num_ > 0, "thread_num_ <= 0");
|
||||
int stride = UP_DIV(outer_size, thread_num_);
|
||||
int count = MSMIN(stride, outer_size - stride * task_id);
|
||||
|
||||
// call the op function
|
||||
if (input0->data_type() == kNumberTypeInt32) {
|
||||
code.CodeFunction("GatherInt32", input0, count, inner_size, limit, input1, indices_element_size, output_tensor_);
|
||||
} else {
|
||||
code.CodeFunction("Gather", input0, count, inner_size, limit, input1, indices_element_size, output_tensor_);
|
||||
}
|
||||
context->AppendCode(code.str());
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_Gather, CPUOpCoderCreator<GatherFP32Coder>)
|
||||
|
||||
} // namespace mindspore::lite::micro
|
|
@ -0,0 +1,41 @@
|
|||
/**
|
||||
* Copyright 2021 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDSPORE_LITE_MICRO_CODER_OPCODERS_GATHER_FP32_CODER_H_
|
||||
#define MINDSPORE_LITE_MICRO_CODER_OPCODERS_GATHER_FP32_CODER_H_
|
||||
|
||||
#include <vector>
|
||||
#include "micro/coder/opcoders/op_coder.h"
|
||||
#include "nnacl/base/tile_base.h"
|
||||
|
||||
namespace mindspore::lite::micro {
|
||||
class GatherFP32Coder : public OperatorCoder {
|
||||
public:
|
||||
GatherFP32Coder(const std::vector<Tensor *> &in_tensors, const std::vector<Tensor *> &out_tensors,
|
||||
const Model::Node *node, size_t node_index, Target target)
|
||||
: OperatorCoder(in_tensors, out_tensors, node, node_index, target) {}
|
||||
|
||||
~GatherFP32Coder() override = default;
|
||||
|
||||
int Prepare(CoderContext *const context) override;
|
||||
|
||||
int DoCode(CoderContext *const context) override;
|
||||
|
||||
private:
|
||||
int32_t *indices_{nullptr};
|
||||
};
|
||||
} // namespace mindspore::lite::micro
|
||||
#endif // MINDSPORE_LITE_MICRO_CODER_OPCODERS_GATHER_FP32_CODER_H_
|
|
@ -0,0 +1,51 @@
|
|||
/**
|
||||
* Copyright 2021 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "micro/coder/opcoders/nnacl/fp32/nchw2nhwc_fp32_coder.h"
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#include "micro/coder/opcoders/serializers/nnacl_serializer/nnacl_fp32_serializer.h"
|
||||
#include "micro/coder/opcoders/file_collector.h"
|
||||
|
||||
using mindspore::schema::PrimitiveType_Nchw2Nhwc;
|
||||
|
||||
namespace mindspore::lite::micro {
|
||||
int Nchw2NhwcFP32Coder::Prepare(CoderContext *const context) { return RET_OK; }
|
||||
|
||||
int Nchw2NhwcFP32Coder::DoCode(CoderContext *context) {
|
||||
// generate code .h .c
|
||||
Collect(context, {"nnacl/pack.h"}, {"nnacl/pack.c"});
|
||||
nnacl::NNaclFp32Serializer code;
|
||||
if (input_tensor_->shape().size() == 4) {
|
||||
if (input_tensor_->data_type() == kNumberTypeFloat32) {
|
||||
code.CodeFunction("PackNCHWToNHWCFp32", input_tensor_, output_tensor_, output_tensor_->Batch(),
|
||||
output_tensor_->Height() * output_tensor_->Width(), output_tensor_->Channel());
|
||||
} else if (input_tensor_->data_type() == kNumberTypeInt8) {
|
||||
code.CodeFunction("PackNCHWToNHWCInt8", input_tensor_, output_tensor_, output_tensor_->Batch(),
|
||||
output_tensor_->Height() * output_tensor_->Width(), output_tensor_->Channel());
|
||||
} else {
|
||||
MS_LOG(ERROR) << "unsupported format transform";
|
||||
}
|
||||
} else {
|
||||
code.CodeFunction("memcpy", output_tensor_, input_tensor_, input_tensor_->ElementsNum() * sizeof(float));
|
||||
}
|
||||
|
||||
context->AppendCode(code.str());
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_Nchw2Nhwc, CPUOpCoderCreator<Nchw2NhwcFP32Coder>)
|
||||
} // namespace mindspore::lite::micro
|
|
@ -0,0 +1,38 @@
|
|||
/**
|
||||
* Copyright 2021 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDSPORE_LITE_MICRO_CODER_OPCODERS_NCHW2FP32_CODER_H_
|
||||
#define MINDSPORE_LITE_MICRO_CODER_OPCODERS_NCHW2FP32_CODER_H_
|
||||
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include "micro/coder/opcoders/op_coder.h"
|
||||
#include "nnacl/base/tile_base.h"
|
||||
|
||||
namespace mindspore::lite::micro {
|
||||
class Nchw2NhwcFP32Coder : public OperatorCoder {
|
||||
public:
|
||||
Nchw2NhwcFP32Coder(const std::vector<Tensor *> &in_tensors, const std::vector<Tensor *> &out_tensors,
|
||||
const Model::Node *node, size_t node_index, Target target)
|
||||
: OperatorCoder(in_tensors, out_tensors, node, node_index, target) {}
|
||||
|
||||
~Nchw2NhwcFP32Coder() override = default;
|
||||
int Prepare(CoderContext *const context) override;
|
||||
|
||||
int DoCode(CoderContext *const context) override;
|
||||
};
|
||||
} // namespace mindspore::lite::micro
|
||||
#endif // MINDSPORE_LITE_MICRO_CODER_OPCODERS_NCHW2FP32_CODER_H_
|
|
@ -0,0 +1,50 @@
|
|||
/**
|
||||
* Copyright 2021 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "micro/coder/opcoders/nnacl/fp32/nhwc2nchw_fp32_coder.h"
|
||||
#include <string>
|
||||
#include "micro/coder/opcoders/serializers/nnacl_serializer/nnacl_fp32_serializer.h"
|
||||
#include "micro/coder/opcoders/file_collector.h"
|
||||
|
||||
using mindspore::schema::PrimitiveType_Nhwc2Nchw;
|
||||
namespace mindspore::lite::micro {
|
||||
int Nhwc2NchwFP32Coder::Prepare(CoderContext *const context) { return RET_OK; }
|
||||
|
||||
int Nhwc2NchwFP32Coder::DoCode(CoderContext *const context) {
|
||||
// generate code .h .c
|
||||
Collect(context, {"nnacl/pack.h"}, {"pack.c"});
|
||||
|
||||
nnacl::NNaclFp32Serializer code;
|
||||
if (input_tensor_->shape().size() == 4) {
|
||||
if (input_tensor_->data_type() == kNumberTypeFloat32) {
|
||||
code.CodeFunction("PackNHWCToNCHWFp32", input_tensor_, output_tensor_, output_tensor_->Batch(),
|
||||
output_tensor_->Height() * output_tensor_->Width(), output_tensor_->Channel());
|
||||
} else if (input_tensor_->data_type() == kNumberTypeInt8) {
|
||||
code.CodeFunction("PackNHWCToNCHWInt8", input_tensor_, output_tensor_, output_tensor_->Batch(),
|
||||
output_tensor_->Height() * output_tensor_->Width(), output_tensor_->Channel());
|
||||
} else {
|
||||
MS_LOG(ERROR) << "unsupported format transform";
|
||||
}
|
||||
} else {
|
||||
code.CodeFunction("memcpy", output_tensor_, input_tensor_, input_tensor_->ElementsNum() * sizeof(float));
|
||||
}
|
||||
|
||||
context->AppendCode(code.str());
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_Nhwc2Nchw, CPUOpCoderCreator<Nhwc2NchwFP32Coder>)
|
||||
} // namespace mindspore::lite::micro
|
|
@ -0,0 +1,37 @@
|
|||
/**
|
||||
* Copyright 2021 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDSPORE_LITE_MICRO_CODER_OPCODERS_NHWC2NCHW_FP32_CODER_H_
|
||||
#define MINDSPORE_LITE_MICRO_CODER_OPCODERS_NHWC2NCHW_FP32_CODER_H_
|
||||
|
||||
#include <vector>
|
||||
#include "micro/coder/opcoders/op_coder.h"
|
||||
#include "nnacl/base/tile_base.h"
|
||||
|
||||
namespace mindspore::lite::micro {
|
||||
class Nhwc2NchwFP32Coder : public OperatorCoder {
|
||||
public:
|
||||
Nhwc2NchwFP32Coder(const std::vector<Tensor *> &in_tensors, const std::vector<Tensor *> &out_tensors,
|
||||
const Model::Node *node, size_t node_index, Target target)
|
||||
: OperatorCoder(in_tensors, out_tensors, node, node_index, target) {}
|
||||
~Nhwc2NchwFP32Coder() override = default;
|
||||
|
||||
int Prepare(CoderContext *const context) override;
|
||||
|
||||
int DoCode(CoderContext *const context) override;
|
||||
};
|
||||
} // namespace mindspore::lite::micro
|
||||
#endif // MINDSPORE_LITE_MICRO_CODER_OPCODERS_NHWC2NCHW_FP32_CODER_H_
|
|
@ -0,0 +1,103 @@
|
|||
/**
|
||||
* Copyright 2021 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "micro/coder/opcoders/nnacl/fp32/pad_fp32_coder.h"
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include "micro/coder/log.h"
|
||||
#include "micro/coder/opcoders/serializers/nnacl_serializer/nnacl_fp32_serializer.h"
|
||||
#include "micro/coder/opcoders/file_collector.h"
|
||||
|
||||
using mindspore::schema::PrimitiveType_Pad;
|
||||
|
||||
namespace mindspore::lite::micro {
|
||||
|
||||
int PadFP32Coder::Prepare(CoderContext *const context) {
|
||||
pad_param_ = reinterpret_cast<PadParameter *>(parameter_);
|
||||
return ReSize();
|
||||
}
|
||||
|
||||
int PadFP32Coder::ReSize() {
|
||||
size_t rank = input_tensor_->shape().size();
|
||||
if (rank > DEFAULT_PAD_NDIMS) {
|
||||
MS_LOG(ERROR) << "Pad input rank should <= " << DEFAULT_PAD_NDIMS << ", got " << rank;
|
||||
return RET_ERROR;
|
||||
}
|
||||
if (pad_param_->pad_mode_ == static_cast<int>(schema::PaddingMode_CONSTANT)) {
|
||||
MS_CHECK_RET_CODE(ExtendShape(in_, DEFAULT_PAD_NDIMS, input_tensor_->shape().data(), rank),
|
||||
"ExtendShape input error");
|
||||
MS_CHECK_RET_CODE(ExtendShape(out_, DEFAULT_PAD_NDIMS, output_tensor_->shape().data(), rank),
|
||||
"ExtendShape output error");
|
||||
if (pad_param_->padding_length < MAX_PAD_SIZE) {
|
||||
int ori_paddings[MAX_PAD_SIZE];
|
||||
for (int i = 0; i < pad_param_->padding_length; ++i) {
|
||||
ori_paddings[i] = pad_param_->paddings_[i];
|
||||
}
|
||||
MS_CHECK_RET_CODE(ExtendPaddings(pad_param_->paddings_, MAX_PAD_SIZE, ori_paddings, pad_param_->padding_length),
|
||||
"Extendpadding error");
|
||||
pad_param_->padding_length = MAX_PAD_SIZE;
|
||||
}
|
||||
}
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
int PadFP32Coder::ExtendShape(int *shape, int length, const int *ori_shape, int rank) {
|
||||
MS_CHECK_PTR(shape);
|
||||
MS_CHECK_PTR(ori_shape);
|
||||
for (int i = 0; i < length - rank; ++i) {
|
||||
shape[i] = 1;
|
||||
}
|
||||
for (int i = length - rank; i < length; ++i) {
|
||||
shape[i] = ori_shape[i - (length - rank)];
|
||||
}
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
int PadFP32Coder::ExtendPaddings(int *paddings, int length, const int *ori_paddings, int ori_length) {
|
||||
MS_CHECK_PTR(paddings);
|
||||
MS_CHECK_PTR(ori_paddings);
|
||||
for (int i = 0; i < length - ori_length; ++i) {
|
||||
paddings[i] = 0;
|
||||
}
|
||||
for (int i = length - ori_length; i < length; ++i) {
|
||||
paddings[i] = ori_paddings[i - (length - ori_length)];
|
||||
}
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
int PadFP32Coder::DoCode(CoderContext *const context) {
|
||||
int task_id = thread_num_ - 1;
|
||||
Collect(context, {"nnacl/fp32/pad.h", "nnacl/pad_parameter.h"}, {"nnacl/fp32/pad.c"});
|
||||
|
||||
nnacl::NNaclFp32Serializer code;
|
||||
code.CodeArray("in_", in_, DEFAULT_PAD_NDIMS);
|
||||
code.CodeArray("out_", out_, DEFAULT_PAD_NDIMS);
|
||||
code.CodeArray("padding_", pad_param_->paddings_, MAX_PAD_SIZE);
|
||||
|
||||
int output_size = output_tensor_->ElementsNum();
|
||||
if (pad_param_->constant_value_ - 0.0f < 1e-5) {
|
||||
code.CodeFunction("memset", output_tensor_, "0", output_size * sizeof(float));
|
||||
} else {
|
||||
std::vector<float> constant_values(output_size, pad_param_->constant_value_);
|
||||
code.CodeArray("output_tensor_", constant_values.data(), output_size);
|
||||
}
|
||||
code.CodeFunction("Pad", input_tensor_, output_tensor_, "in_", "out_", "padding_", task_id, thread_num_);
|
||||
context->AppendCode(code.str());
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_Pad, CPUOpCoderCreator<PadFP32Coder>)
|
||||
} // namespace mindspore::lite::micro
|
|
@ -0,0 +1,49 @@
|
|||
/**
|
||||
* Copyright 2021 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDSPORE_LITE_MICRO_CODER_OPCODERS_PAD_FP32_CODER_H_
|
||||
#define MINDSPORE_LITE_MICRO_CODER_OPCODERS_PAD_FP32_CODER_H_
|
||||
|
||||
#include <vector>
|
||||
#include "micro/coder/opcoders/op_coder.h"
|
||||
#include "nnacl/fp32/pad_fp32.h"
|
||||
|
||||
namespace mindspore::lite::micro {
|
||||
class PadFP32Coder : public OperatorCoder {
|
||||
public:
|
||||
PadFP32Coder(const std::vector<Tensor *> &in_tensors, const std::vector<Tensor *> &out_tensors,
|
||||
const Model::Node *node, size_t node_index, Target target)
|
||||
: OperatorCoder(in_tensors, out_tensors, node, node_index, target) {}
|
||||
|
||||
~PadFP32Coder() override = default;
|
||||
|
||||
int Prepare(CoderContext *const context) override;
|
||||
|
||||
int DoCode(CoderContext *const context) override;
|
||||
|
||||
int ReSize();
|
||||
|
||||
private:
|
||||
int ExtendShape(int *shape, int length, const int *ori_shape, int rank);
|
||||
int ExtendPaddings(int *paddings, int length, const int *ori_paddings, int ori_length);
|
||||
|
||||
protected:
|
||||
PadParameter *pad_param_{nullptr};
|
||||
int in_[DEFAULT_PAD_NDIMS]{0};
|
||||
int out_[DEFAULT_PAD_NDIMS]{0};
|
||||
};
|
||||
} // namespace mindspore::lite::micro
|
||||
#endif // MINDSPORE_LITE_MICRO_CODER_OPCODERS_PAD_FP32_CODER_H_
|
|
@ -0,0 +1,103 @@
|
|||
/**
|
||||
* Copyright 2021 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#include "micro/coder/opcoders/nnacl/fp32/pooling_fp32_coder.h"
|
||||
#include <cfloat>
|
||||
#include <string>
|
||||
#include "nnacl/fp32/pooling_fp32.h"
|
||||
#include "micro/coder/opcoders/serializers/nnacl_serializer/nnacl_fp32_serializer.h"
|
||||
#include "micro/coder/log.h"
|
||||
#include "micro/coder/opcoders/file_collector.h"
|
||||
|
||||
using mindspore::schema::PrimitiveType_Pooling;
|
||||
|
||||
namespace mindspore::lite::micro {
|
||||
|
||||
int PoolingFP32Coder::DoCode(CoderContext *const context) {
|
||||
// attribute
|
||||
auto pooling_parameter = reinterpret_cast<PoolingParameter *>(parameter_);
|
||||
int task_id = 0;
|
||||
// init struct PoolingParameters
|
||||
pooling_parameter->input_batch_ = input_tensor_->Batch();
|
||||
pooling_parameter->input_channel_ = input_tensor_->Channel();
|
||||
pooling_parameter->input_h_ = input_tensor_->Height();
|
||||
pooling_parameter->input_w_ = input_tensor_->Width();
|
||||
pooling_parameter->output_batch_ = output_tensor_->Batch();
|
||||
pooling_parameter->output_channel_ = output_tensor_->Channel();
|
||||
pooling_parameter->output_h_ = output_tensor_->Height();
|
||||
pooling_parameter->output_w_ = output_tensor_->Width();
|
||||
|
||||
pooling_parameter->thread_num_ = pooling_parameter->op_parameter_.thread_num_;
|
||||
|
||||
nnacl::NNaclFp32Serializer code;
|
||||
code.CodeStruct("pooling_parameter", *pooling_parameter);
|
||||
float minf = -FLT_MAX;
|
||||
float maxf = FLT_MAX;
|
||||
if (pooling_parameter->pool_mode_ == PoolMode_MaxPool) {
|
||||
Collect(context, {"nnacl/kernel/fp32/max_pooling_fp32_slim.h"}, {"max_pooling_fp32_slim.c"});
|
||||
switch (pooling_parameter->act_type_) {
|
||||
case ActType_Relu: {
|
||||
minf = 0.f;
|
||||
break;
|
||||
}
|
||||
case ActType_Relu6: {
|
||||
minf = 0.f;
|
||||
maxf = 6.f;
|
||||
break;
|
||||
}
|
||||
default: {
|
||||
MS_LOG(INFO) << "no actype";
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (thread_num_ > 1) {
|
||||
code.CodeBaseStruct("PoolingFp32Args", "args", input_tensor_, output_tensor_, "&pooling_parameter", minf, maxf);
|
||||
CODE_PARALLEL_FUNC("MaxPoolingFp32Run");
|
||||
} else {
|
||||
code.CodeFunction("MaxPooling", input_tensor_, output_tensor_, "&pooling_parameter", task_id, minf, maxf);
|
||||
}
|
||||
} else {
|
||||
Collect(context, {"nnacl/fp32/pooling.h"}, {"pooling.c"});
|
||||
switch (pooling_parameter->act_type_) {
|
||||
case ActType_Relu: {
|
||||
minf = 0.f;
|
||||
break;
|
||||
}
|
||||
case ActType_Relu6: {
|
||||
minf = 0.f;
|
||||
maxf = 6.f;
|
||||
break;
|
||||
}
|
||||
default: {
|
||||
MS_LOG(INFO) << "no actype";
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (thread_num_ > 1) {
|
||||
code.CodeBaseStruct("PoolingFp32Args", "args", input_tensor_, output_tensor_, "&pooling_parameter", minf, maxf);
|
||||
CODE_PARALLEL_FUNC("AvgPoolingFp32Run");
|
||||
} else {
|
||||
code.CodeFunction("AvgPooling", input_tensor_, output_tensor_, "&pooling_parameter", task_id, minf, maxf);
|
||||
}
|
||||
}
|
||||
|
||||
MS_LOG(INFO) << "PoolingFp32Code has been called";
|
||||
context->AppendCode(code.str());
|
||||
return lite::RET_OK;
|
||||
}
|
||||
|
||||
REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_Pooling, CPUOpCoderCreator<PoolingFP32Coder>)
|
||||
} // namespace mindspore::lite::micro
|
|
@ -0,0 +1,39 @@
|
|||
/**
|
||||
* Copyright 2021 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MICRO_CODER_OPCODERS_FP32_POOLFP32_CODER_H_
|
||||
#define MICRO_CODER_OPCODERS_FP32_POOLFP32_CODER_H_
|
||||
|
||||
#include <vector>
|
||||
#include "micro/coder/opcoders/op_coder.h"
|
||||
|
||||
namespace mindspore::lite::micro {
|
||||
|
||||
class PoolingFP32Coder final : public OperatorCoder {
|
||||
public:
|
||||
PoolingFP32Coder(const std::vector<Tensor *> &in_tensors, const std::vector<Tensor *> &out_tensors,
|
||||
const Model::Node *node, size_t node_index, Target target)
|
||||
: OperatorCoder(in_tensors, out_tensors, node, node_index, target) {}
|
||||
~PoolingFP32Coder() override = default;
|
||||
|
||||
int Prepare(CoderContext *const context) override { return RET_OK; }
|
||||
|
||||
int DoCode(CoderContext *const context) override;
|
||||
};
|
||||
|
||||
} // namespace mindspore::lite::micro
|
||||
|
||||
#endif // MICRO_CODER_OPCODERS_FP32__CODER_H_
|
|
@ -0,0 +1,60 @@
|
|||
/**
|
||||
* Copyright 2021 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "micro/coder/opcoders/nnacl/fp32/power_fp32_coder.h"
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include "micro/coder/opcoders/serializers/nnacl_serializer/nnacl_fp32_serializer.h"
|
||||
#include "micro/coder/opcoders/file_collector.h"
|
||||
|
||||
using mindspore::schema::PrimitiveType_Power;
|
||||
|
||||
namespace mindspore::lite::micro {
|
||||
|
||||
int PowerFP32Coder::DoCode(CoderContext *const context) {
|
||||
scale_ = reinterpret_cast<PowerParameter *>(parameter_)->scale_;
|
||||
shift_ = reinterpret_cast<PowerParameter *>(parameter_)->shift_;
|
||||
|
||||
Tensor *filter_tensor = input_tensors_.at(kWeightIndex);
|
||||
MS_CHECK_PTR(filter_tensor);
|
||||
int size = input_tensor_->ElementsNum();
|
||||
int task_id = 0;
|
||||
MS_CHECK_TRUE(thread_num_ > 0, "thread_num_ <= 0");
|
||||
int stride = UP_DIV(size, thread_num_);
|
||||
int len = MSMIN(stride, size - stride * task_id);
|
||||
std::string exp_addr;
|
||||
bool broadcast = true;
|
||||
if (input_tensors_.size() == 2) {
|
||||
exp_addr = allocator_->GetRuntimeAddr(filter_tensor);
|
||||
broadcast = !(input_tensor_->shape() == filter_tensor->shape());
|
||||
}
|
||||
std::string cur_exp_str;
|
||||
if (broadcast) {
|
||||
cur_exp_str = input_tensors_.size() == 2 ? exp_addr : "&power";
|
||||
} else {
|
||||
cur_exp_str = exp_addr;
|
||||
}
|
||||
// generate code .h .c
|
||||
Collect(context, {"nnacl/power.h"}, {"power.c"});
|
||||
nnacl::NNaclFp32Serializer code;
|
||||
code.CodeFunction("Power", input_tensor_, cur_exp_str, output_tensor_, len, scale_, shift_, broadcast);
|
||||
context->AppendCode(code.str());
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_Power, CPUOpCoderCreator<PowerFP32Coder>)
|
||||
|
||||
} // namespace mindspore::lite::micro
|
|
@ -0,0 +1,42 @@
|
|||
/**
|
||||
* Copyright 2021 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDSPORE_LITE_MICRO_CODER_OPCODERS_POWER_FP32_CODER_H_
|
||||
#define MINDSPORE_LITE_MICRO_CODER_OPCODERS_POWER_FP32_CODER_H_
|
||||
|
||||
#include <vector>
|
||||
#include "micro/coder/opcoders/op_coder.h"
|
||||
#include "nnacl/power_parameter.h"
|
||||
|
||||
namespace mindspore::lite::micro {
|
||||
class PowerFP32Coder : public OperatorCoder {
|
||||
public:
|
||||
PowerFP32Coder(const std::vector<Tensor *> &in_tensors, const std::vector<Tensor *> &out_tensors,
|
||||
const Model::Node *node, size_t node_index, Target target)
|
||||
: OperatorCoder(in_tensors, out_tensors, node, node_index, target) {}
|
||||
|
||||
~PowerFP32Coder() override = default;
|
||||
|
||||
int Prepare(CoderContext *const context) override { return RET_OK; }
|
||||
|
||||
int DoCode(CoderContext *const context) override;
|
||||
|
||||
private:
|
||||
float scale_{0.0f};
|
||||
float shift_{0.0f};
|
||||
};
|
||||
} // namespace mindspore::lite::micro
|
||||
#endif // MINDSPORE_LITE_MICRO_CODER_OPCODERS_POWER_FP32_CODER_H_
|
|
@ -0,0 +1,39 @@
|
|||
/**
|
||||
* Copyright 2021 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "micro/coder/opcoders/nnacl/fp32/reshape_fp32_coder.h"
|
||||
#include <string>
|
||||
#include "micro/coder/opcoders/serializers/nnacl_serializer/nnacl_fp32_serializer.h"
|
||||
#include "micro/coder/opcoders/file_collector.h"
|
||||
|
||||
using mindspore::schema::PrimitiveType_Reshape;
|
||||
namespace mindspore::lite::micro {
|
||||
|
||||
int ReshapeFP32Coder::DoCode(CoderContext *const context) {
|
||||
size_t data_size = input_tensor_->Size();
|
||||
|
||||
Collect(context, {"nnacl/reshape.h"}, {"reshape.c"});
|
||||
|
||||
nnacl::NNaclFp32Serializer code;
|
||||
code.CodeFunction("Reshape", input_tensor_, output_tensor_, data_size);
|
||||
context->AppendCode(code.str());
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_Reshape, CPUOpCoderCreator<ReshapeFP32Coder>)
|
||||
|
||||
REG_OPERATOR_CODER(kAllTargets, kNumberTypeInt32, PrimitiveType_Reshape, CPUOpCoderCreator<ReshapeFP32Coder>)
|
||||
} // namespace mindspore::lite::micro
|
|
@ -0,0 +1,35 @@
|
|||
/**
|
||||
* Copyright 2021 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDSPORE_LITE_MICRO_CODER_OPCODERS_RESHAPE_FP32_CODER_H_
|
||||
#define MINDSPORE_LITE_MICRO_CODER_OPCODERS_RESHAPE_FP32_CODER_H_
|
||||
|
||||
#include <vector>
|
||||
#include "micro/coder/opcoders/op_coder.h"
|
||||
|
||||
namespace mindspore::lite::micro {
|
||||
class ReshapeFP32Coder : public OperatorCoder {
|
||||
public:
|
||||
ReshapeFP32Coder(const std::vector<Tensor *> &in_tensors, const std::vector<Tensor *> &out_tensors,
|
||||
const Model::Node *node, size_t node_index, Target target)
|
||||
: OperatorCoder(in_tensors, out_tensors, node, node_index, target) {}
|
||||
~ReshapeFP32Coder() override = default;
|
||||
int Prepare(CoderContext *const context) override { return RET_OK; }
|
||||
|
||||
int DoCode(CoderContext *const context) override;
|
||||
};
|
||||
} // namespace mindspore::lite::micro
|
||||
#endif // MINDSPORE_LITE_MICRO_CODER_OPCODERS_RESHAPE_FP32_CODER_H_
|
|
@ -0,0 +1,164 @@
|
|||
/**
|
||||
* Copyright 2021 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#include "micro/coder/opcoders/nnacl/fp32/scale_fp32_coder.h"
|
||||
#include <string>
|
||||
#include "micro/coder/log.h"
|
||||
#include "micro/coder/opcoders/serializers/nnacl_serializer/nnacl_fp32_serializer.h"
|
||||
#include "micro/coder/opcoders/file_collector.h"
|
||||
|
||||
using mindspore::schema::PrimitiveType_Scale;
|
||||
|
||||
namespace mindspore::lite::micro {
|
||||
ScaleFP32Coder::~ScaleFP32Coder() {
|
||||
if (scale_param_->const_scale_) {
|
||||
if (scale_) {
|
||||
free(scale_);
|
||||
scale_ = nullptr;
|
||||
}
|
||||
}
|
||||
if (scale_param_->const_offset_) {
|
||||
if (offset_) {
|
||||
free(offset_);
|
||||
offset_ = nullptr;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int ScaleFP32Coder::InitScaleOffset() {
|
||||
Tensor *scale_tensor = input_tensors_.at(kWeightIndex);
|
||||
MS_CHECK_PTR(scale_tensor);
|
||||
if (reinterpret_cast<float *>(scale_tensor->data_c())) {
|
||||
scale_param_->const_scale_ = true;
|
||||
scale_ = reinterpret_cast<float *>(malloc(scale_tensor->ElementsNum() * sizeof(float)));
|
||||
MS_CHECK_PTR(scale_);
|
||||
MS_CHECK_TRUE(scale_tensor->Size() > 0, "invalid scale tensor size");
|
||||
MS_CHECK_RET_CODE(memcpy_s(scale_, scale_tensor->Size(), scale_tensor->data_c(), scale_tensor->Size()),
|
||||
"memcpy scale failed");
|
||||
} else {
|
||||
scale_param_->const_scale_ = false;
|
||||
scale_ = nullptr;
|
||||
}
|
||||
|
||||
if (input_tensors_.size() == 2) {
|
||||
scale_param_->const_offset_ = true;
|
||||
offset_ = reinterpret_cast<float *>(malloc(scale_tensor->ElementsNum() * sizeof(float)));
|
||||
MS_CHECK_PTR(offset_);
|
||||
MS_CHECK_RET_CODE(memset_s(offset_, scale_tensor->Size(), 0, scale_tensor->Size()), "memset_s failed!");
|
||||
} else if (input_tensors_.size() == 3 && reinterpret_cast<float *>(input_tensors_.at(2)->data_c())) {
|
||||
scale_param_->const_offset_ = true;
|
||||
Tensor *offset_tensor = input_tensors_.at(2);
|
||||
offset_ = reinterpret_cast<float *>(malloc(offset_tensor->ElementsNum() * sizeof(float)));
|
||||
MS_CHECK_PTR(offset_);
|
||||
MS_CHECK_TRUE(offset_tensor->Size() > 0, "invalid offset tensor size");
|
||||
MS_CHECK_RET_CODE(memcpy_s(offset_, offset_tensor->Size(), offset_tensor->data_c(), offset_tensor->Size()),
|
||||
"memcpy_s failed!");
|
||||
} else {
|
||||
scale_param_->const_offset_ = false;
|
||||
offset_ = nullptr;
|
||||
}
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
int ScaleFP32Coder::CalculateParameter() {
|
||||
std::vector<int> in_shape = input_tensor_->shape();
|
||||
Tensor *scale_tensor = input_tensors_.at(kWeightIndex);
|
||||
MS_CHECK_PTR(scale_tensor);
|
||||
std::vector<int> scale_shape = scale_tensor->shape();
|
||||
|
||||
if (scale_param_->axis_ < 0) {
|
||||
scale_param_->axis_ = scale_param_->axis_ + in_shape.size();
|
||||
}
|
||||
if (scale_shape.size() + scale_param_->axis_ > in_shape.size()) {
|
||||
MS_LOG(ERROR) << "Scale tensor shape is incorrect.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
scale_param_->outer_size_ = 1;
|
||||
scale_param_->axis_size_ = 1;
|
||||
scale_param_->inner_size_ = 1;
|
||||
for (int i = 0; i < scale_param_->axis_; i++) {
|
||||
scale_param_->outer_size_ *= in_shape.at(i);
|
||||
}
|
||||
for (size_t i = 0; i < scale_shape.size(); i++) {
|
||||
if (in_shape.at(i + scale_param_->axis_) != scale_shape.at(i)) {
|
||||
MS_LOG(ERROR) << "Scale tensor shape is incorrect.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
scale_param_->axis_size_ *= in_shape.at(i + scale_param_->axis_);
|
||||
}
|
||||
for (size_t i = scale_param_->axis_ + scale_shape.size(); i < in_shape.size(); i++) {
|
||||
scale_param_->inner_size_ *= in_shape.at(i);
|
||||
}
|
||||
scale_param_->op_parameter_.thread_num_ = MSMIN(scale_param_->op_parameter_.thread_num_, scale_param_->outer_size_);
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
int ScaleFP32Coder::Prepare(CoderContext *const context) {
|
||||
this->scale_param_ = reinterpret_cast<ScaleParameter *>(parameter_);
|
||||
if (input_tensors_.size() < 2 || input_tensors_.size() > 3) {
|
||||
MS_LOG(ERROR) << "inputs to Scale operator should be 2 or 3, but " << input_tensors_.size() << " is given.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
MS_CHECK_RET_CODE(InitScaleOffset(), "Scale fp32 InitScaleOffset failed.");
|
||||
return ReSize();
|
||||
}
|
||||
|
||||
int ScaleFP32Coder::ReSize() {
|
||||
MS_CHECK_RET_CODE(CalculateParameter(), "Scale fp32 CalculateParameter failed.");
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
int ScaleFP32Coder::DoCode(CoderContext *const context) {
|
||||
// init struct ScaleParameters
|
||||
Tensor *scale_tensor = input_tensors_.at(kWeightIndex);
|
||||
Tensor *offset_tensor = input_tensors_.at(kBiasIndex);
|
||||
MS_CHECK_PTR(scale_tensor);
|
||||
MS_CHECK_PTR(offset_tensor);
|
||||
Collect(context, {"nnacl/scale.h", "nnacl/fp32/scale.h", "nnacl/quantization/quantize.h"}, {"scale.c"});
|
||||
|
||||
nnacl::NNaclFp32Serializer code;
|
||||
code.CodeStruct("scale_parameter", *scale_param_);
|
||||
|
||||
if (thread_num_ > 1) {
|
||||
code.CodeBaseStruct("ScaleFp32Args", "args", input_tensor_, output_tensor_, scale_tensor, offset_tensor,
|
||||
"&scale_parameter");
|
||||
CODE_PARALLEL_FUNC("ScaleFp32Run");
|
||||
} else {
|
||||
int task_id = 0;
|
||||
switch (scale_param_->activation_type_) {
|
||||
case schema::ActivationType_RELU6:
|
||||
code.CodeFunction("DoScaleRelu6", input_tensor_, output_tensor_, scale_tensor, offset_tensor, task_id,
|
||||
"&scale_parameter");
|
||||
break;
|
||||
case schema::ActivationType_RELU:
|
||||
code.CodeFunction("DoScaleRelu", input_tensor_, output_tensor_, scale_tensor, offset_tensor, task_id,
|
||||
"&scale_parameter");
|
||||
break;
|
||||
case schema::ActivationType_NO_ACTIVATION:
|
||||
code.CodeFunction("DoScale", input_tensor_, output_tensor_, scale_tensor, offset_tensor, task_id,
|
||||
"&scale_parameter");
|
||||
break;
|
||||
default:
|
||||
MS_LOG(ERROR) << "Scale does not support activation type " << scale_param_->activation_type_;
|
||||
return RET_ERROR;
|
||||
}
|
||||
}
|
||||
MS_LOG(INFO) << "ScaleFP32Code has been called";
|
||||
context->AppendCode(code.str());
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_Scale, CPUOpCoderCreator<ScaleFP32Coder>)
|
||||
} // namespace mindspore::lite::micro
|
|
@ -0,0 +1,49 @@
|
|||
/**
|
||||
* Copyright 2021 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MICRO_CODER_OPCODERS_FP32_SCALEFP32_CODER_H_
|
||||
#define MICRO_CODER_OPCODERS_FP32_SCALEFP32_CODER_H_
|
||||
|
||||
#include <vector>
|
||||
#include "micro/coder/opcoders/op_coder.h"
|
||||
#include "nnacl/scale.h"
|
||||
|
||||
namespace mindspore::lite::micro {
|
||||
|
||||
class ScaleFP32Coder final : public OperatorCoder {
|
||||
public:
|
||||
ScaleFP32Coder(const std::vector<Tensor *> &in_tensors, const std::vector<Tensor *> &out_tensors,
|
||||
const Model::Node *node, size_t node_index, Target target)
|
||||
: OperatorCoder(in_tensors, out_tensors, node, node_index, target) {}
|
||||
~ScaleFP32Coder() override;
|
||||
int Prepare(CoderContext *const context) override;
|
||||
|
||||
int DoCode(CoderContext *const context) override;
|
||||
|
||||
private:
|
||||
int ReSize();
|
||||
int CalculateParameter();
|
||||
int InitScaleOffset();
|
||||
|
||||
private:
|
||||
ScaleParameter *scale_param_{nullptr};
|
||||
float *scale_{nullptr};
|
||||
float *offset_{nullptr};
|
||||
};
|
||||
|
||||
} // namespace mindspore::lite::micro
|
||||
|
||||
#endif // MICRO_CODER_OPCODERS_FP32__CODER_H_
|
|
@ -0,0 +1,74 @@
|
|||
/**
|
||||
* Copyright 2021 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "micro/coder/opcoders/nnacl/fp32/slice_fp32_coder.h"
|
||||
#include <string>
|
||||
#include "nnacl/slice_parameter.h"
|
||||
#include "src/ops/slice.h"
|
||||
#include "micro/coder/opcoders/serializers/nnacl_serializer/nnacl_fp32_serializer.h"
|
||||
#include "micro/coder/opcoders/file_collector.h"
|
||||
|
||||
using mindspore::schema::PrimitiveType_Slice;
|
||||
namespace mindspore::lite::micro {
|
||||
int SliceFP32Coder::Prepare(CoderContext *const context) { return RET_OK; }
|
||||
|
||||
int SliceFP32Coder::DoCode(CoderContext *const context) {
|
||||
// generate code .h .c
|
||||
Collect(context, {"nnacl/slice_parameter.h", "nnacl/fp32/slice.h"}, {"slice.c"});
|
||||
|
||||
auto param = reinterpret_cast<SliceParameter *>(parameter_);
|
||||
auto primitive_slice = reinterpret_cast<const mindspore::lite::Slice *>(OperatorCoder::primitive());
|
||||
std::vector<int> begin = primitive_slice->GetPostProcessBegin();
|
||||
std::vector<int> size = primitive_slice->GetPostProcessSize();
|
||||
std::vector<int> input_shape = input_tensor_->shape();
|
||||
nnacl::NNaclFp32Serializer code;
|
||||
for (int i = 0; i < param->param_length_; i++) {
|
||||
param->shape_[i] = input_shape.at(i);
|
||||
}
|
||||
|
||||
for (int i = 0; i < param->param_length_; i++) {
|
||||
param->begin_[i] = begin.at(i);
|
||||
}
|
||||
|
||||
for (int i = 0; i < param->param_length_; i++) {
|
||||
int tmp_size = size.at(i);
|
||||
if (size.at(i) < 0) {
|
||||
tmp_size = input_shape.at(i) - begin.at(i);
|
||||
}
|
||||
param->end_[i] = (begin.at(i) + tmp_size);
|
||||
}
|
||||
|
||||
for (int i = 0; i < param->param_length_; i++) {
|
||||
if (size.at(i) < 0) {
|
||||
param->size_[i] = (input_shape.at(i) - begin.at(i));
|
||||
continue;
|
||||
}
|
||||
param->size_[i] = size.at(i);
|
||||
}
|
||||
|
||||
code.CodeStruct("slice_parameter", *param);
|
||||
|
||||
// call the op function
|
||||
if (param->param_length_ < DIMENSION_4D) {
|
||||
code.CodeFunction("PadSliceParameterTo4D", "&slice_parameter");
|
||||
}
|
||||
code.CodeFunction("DoSliceNoParallel", input_tensor_, output_tensor_, "&slice_parameter");
|
||||
context->AppendCode(code.str());
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_Slice, CPUOpCoderCreator<SliceFP32Coder>)
|
||||
} // namespace mindspore::lite::micro
|
|
@ -0,0 +1,37 @@
|
|||
/**
|
||||
* Copyright 2021 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDSPORE_LITE_MICRO_CODER_OPCODERS_SLICE_FP32_CODER_H_
|
||||
#define MINDSPORE_LITE_MICRO_CODER_OPCODERS_SLICE_FP32_CODER_H_
|
||||
|
||||
#include <vector>
|
||||
#include "micro/coder/opcoders/op_coder.h"
|
||||
|
||||
namespace mindspore::lite::micro {
|
||||
class SliceFP32Coder : public OperatorCoder {
|
||||
public:
|
||||
SliceFP32Coder(const std::vector<Tensor *> &in_tensors, const std::vector<Tensor *> &out_tensors,
|
||||
const Model::Node *node, size_t node_index, Target target)
|
||||
: OperatorCoder(in_tensors, out_tensors, node, node_index, target) {}
|
||||
|
||||
~SliceFP32Coder() override = default;
|
||||
|
||||
int Prepare(CoderContext *const context) override;
|
||||
|
||||
int DoCode(CoderContext *const context) override;
|
||||
};
|
||||
} // namespace mindspore::lite::micro
|
||||
#endif // MINDSPORE_LITE_MICRO_CODER_OPCOD ERS_SLICE_FP32_CODER_H_
|
|
@ -0,0 +1,45 @@
|
|||
/**
|
||||
* Copyright 2021 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "micro/coder/opcoders/nnacl/fp32/squeeze_dims_fp32_coder.h"
|
||||
#include <string>
|
||||
#include "micro/coder/opcoders/serializers/nnacl_serializer/nnacl_fp32_serializer.h"
|
||||
#include "micro/coder/opcoders/file_collector.h"
|
||||
|
||||
using mindspore::schema::PrimitiveType_Squeeze;
|
||||
|
||||
namespace mindspore::lite::micro {
|
||||
|
||||
int SqueezeFP32Coder::DoCode(CoderContext *const context) {
|
||||
size_t data_size = input_tensor_->Size();
|
||||
// generate code .h .c
|
||||
Collect(context, {"nnacl/squeeze.h"}, {"nnacl/squeeze.c"});
|
||||
|
||||
nnacl::NNaclFp32Serializer code;
|
||||
|
||||
// call the op function
|
||||
if (input_tensor_->data_type() == kNumberTypeInt32) {
|
||||
code.CodeFunction("DoSqueezeInt32", input_tensor_, output_tensor_, data_size);
|
||||
} else {
|
||||
code.CodeFunction("DoSqueeze", input_tensor_, output_tensor_, data_size);
|
||||
}
|
||||
context->AppendCode(code.str());
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_Squeeze, CPUOpCoderCreator<SqueezeFP32Coder>)
|
||||
|
||||
} // namespace mindspore::lite::micro
|
|
@ -0,0 +1,37 @@
|
|||
/**
|
||||
* Copyright 2021 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDSPORE_LITE_MICRO_CODER_OPCODERS_SQUEEZE_FP32_CODER_H_
|
||||
#define MINDSPORE_LITE_MICRO_CODER_OPCODERS_SQUEEZE_FP32_CODER_H_
|
||||
|
||||
#include <vector>
|
||||
#include "micro/coder/opcoders/op_coder.h"
|
||||
|
||||
namespace mindspore::lite::micro {
|
||||
class SqueezeFP32Coder : public OperatorCoder {
|
||||
public:
|
||||
SqueezeFP32Coder(const std::vector<Tensor *> &in_tensors, const std::vector<Tensor *> &out_tensors,
|
||||
const Model::Node *node, size_t node_index, Target target)
|
||||
: OperatorCoder(in_tensors, out_tensors, node, node_index, target) {}
|
||||
|
||||
~SqueezeFP32Coder() override = default;
|
||||
|
||||
int Prepare(CoderContext *const context) override { return RET_OK; }
|
||||
|
||||
int DoCode(CoderContext *const context) override;
|
||||
};
|
||||
} // namespace mindspore::lite::micro
|
||||
#endif // MINDSPORE_LITE_MICRO_CODER_OPCODERS_SQUEEZE_FP32_CODER_H_
|
|
@ -0,0 +1,68 @@
|
|||
/**
|
||||
* Copyright 2021 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "micro/coder/opcoders/nnacl/fp32/tile_fp32_coder.h"
|
||||
#include <string>
|
||||
#include <type_traits>
|
||||
#include "micro/coder/opcoders/serializers/nnacl_serializer/nnacl_fp32_serializer.h"
|
||||
#include "micro/coder/opcoders/file_collector.h"
|
||||
|
||||
using mindspore::schema::PrimitiveType_Tile;
|
||||
|
||||
namespace mindspore::lite::micro {
|
||||
void TileFP32Coder::ComputeStrides(const int *shape, int *strides, int ndim) const {
|
||||
int stride = 1;
|
||||
for (int i = ndim - 1; i >= 0; i--) {
|
||||
strides[i] = stride;
|
||||
stride *= shape[i];
|
||||
}
|
||||
}
|
||||
|
||||
int TileFP32Coder::Resize() {
|
||||
tile_param_ = reinterpret_cast<TileParameter *>(parameter_);
|
||||
MS_CHECK_TRUE(tile_param_->in_dim_ < static_cast<int>(std::extent<decltype(tile_param_->in_dim_)>::value),
|
||||
"invalid dims count");
|
||||
MS_CHECK_TRUE(static_cast<int>(input_tensor_->shape().size()) < tile_param_->in_dim_, "invalid input shape number.");
|
||||
MS_CHECK_TRUE(static_cast<int>(output_tensor_->shape().size()) < tile_param_->in_dim_,
|
||||
"invalid output shape number.");
|
||||
for (int i = 0; i < tile_param_->in_dim_; ++i) {
|
||||
tile_param_->in_shape_[i] = input_tensor_->shape().at(i);
|
||||
tile_param_->out_shape_[i] = output_tensor_->shape().at(i);
|
||||
}
|
||||
ComputeStrides(tile_param_->in_shape_, tile_param_->in_strides_, tile_param_->in_dim_);
|
||||
ComputeStrides(tile_param_->out_shape_, tile_param_->out_strides_, tile_param_->in_dim_);
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
int TileFP32Coder::Prepare(CoderContext *const context) { return Resize(); }
|
||||
|
||||
int TileFP32Coder::DoCode(CoderContext *const context) {
|
||||
// generate code .h .c
|
||||
Collect(context, {"nnacl/fp32/tile.h"}, {"nnacl/fp32/tile.c"});
|
||||
|
||||
nnacl::NNaclFp32Serializer code;
|
||||
|
||||
code.CodeStruct("tile_parameter", *tile_param_);
|
||||
// call the op function
|
||||
code.CodeFunction("Tile", input_tensor_, output_tensor_, "&tile_parameter");
|
||||
|
||||
context->AppendCode(code.str());
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_Tile, CPUOpCoderCreator<TileFP32Coder>)
|
||||
|
||||
} // namespace mindspore::lite::micro
|
|
@ -0,0 +1,43 @@
|
|||
/**
|
||||
* Copyright 2021 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDSPORE_LITE_MICRO_CODER_OPCODERS_TILE_FP32_CODER_H_
|
||||
#define MINDSPORE_LITE_MICRO_CODER_OPCODERS_TILE_FP32_CODER_H_
|
||||
|
||||
#include <vector>
|
||||
#include "micro/coder/opcoders/op_coder.h"
|
||||
#include "nnacl/base/tile_base.h"
|
||||
|
||||
namespace mindspore::lite::micro {
|
||||
class TileFP32Coder : public OperatorCoder {
|
||||
public:
|
||||
TileFP32Coder(const std::vector<Tensor *> &in_tensors, const std::vector<Tensor *> &out_tensors,
|
||||
const Model::Node *node, size_t node_index, Target target)
|
||||
: OperatorCoder(in_tensors, out_tensors, node, node_index, target) {}
|
||||
~TileFP32Coder() override = default;
|
||||
|
||||
int Prepare(CoderContext *const context) override;
|
||||
|
||||
int DoCode(CoderContext *const context) override;
|
||||
|
||||
private:
|
||||
void ComputeStrides(const int *shape, int *strides, int ndim) const;
|
||||
int Resize();
|
||||
|
||||
TileParameter *tile_param_{nullptr};
|
||||
};
|
||||
} // namespace mindspore::lite::micro
|
||||
#endif // MINDSPORE_LITE_MICRO_CODER_OPCODERS_TILE_FP32_CODER_H_
|
|
@ -0,0 +1,94 @@
|
|||
/**
|
||||
* Copyright 2021 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "micro/coder/opcoders/nnacl/fp32/transpose_fp32_coder.h"
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#include "micro/coder/opcoders/serializers/nnacl_serializer/nnacl_fp32_serializer.h"
|
||||
#include "micro/coder/opcoders/file_collector.h"
|
||||
|
||||
using mindspore::schema::PrimitiveType_Transpose;
|
||||
namespace mindspore::lite::micro {
|
||||
|
||||
int TransposeFp32Coder::Resize() {
|
||||
num_unit_ = static_cast<int>(input_tensor_->shape().at(transpose_parameter_->perm_[kNHWC_H]));
|
||||
thread_h_num_ = MSMIN(thread_num_, num_unit_);
|
||||
MS_CHECK_TRUE(thread_h_num_ > 0, "thread_h_num_ <= 0");
|
||||
thread_h_stride_ = UP_DIV(num_unit_, thread_h_num_);
|
||||
|
||||
std::vector<int> in_shape = input_tensor_->shape();
|
||||
std::vector<int> out_shape = output_tensor_->shape();
|
||||
transpose_parameter_->strides_[transpose_parameter_->num_axes_ - 1] = 1;
|
||||
transpose_parameter_->out_strides_[transpose_parameter_->num_axes_ - 1] = 1;
|
||||
transpose_parameter_->data_size_ = static_cast<int>(input_tensor_->Size());
|
||||
for (int i = transpose_parameter_->num_axes_ - 2; i >= 0; i--) {
|
||||
transpose_parameter_->strides_[i] = in_shape.at(i + 1) * transpose_parameter_->strides_[i + 1];
|
||||
transpose_parameter_->out_strides_[i] = out_shape.at(i + 1) * transpose_parameter_->out_strides_[i + 1];
|
||||
}
|
||||
MS_CHECK_TRUE(in_shape.size() > 0, "invalid shape size");
|
||||
MS_CHECK_TRUE(out_shape.size() > 0, "invalid shape size");
|
||||
auto in_shape_data_size = static_cast<size_t>(in_shape.size() * sizeof(int));
|
||||
auto out_shape_data_size = static_cast<size_t>(out_shape.size() * sizeof(int));
|
||||
in_shape_ = reinterpret_cast<int *>(allocator_->Malloc(kNumberTypeInt, in_shape_data_size, kOfflinePackWeight));
|
||||
MS_CHECK_PTR(in_shape_);
|
||||
out_shape_ =
|
||||
reinterpret_cast<int *>(allocator_->Malloc(kNumberTypeInt, out_shape.size() * sizeof(int), kOfflinePackWeight));
|
||||
MS_CHECK_PTR(out_shape_);
|
||||
MS_CHECK_RET_CODE(memcpy_s(in_shape_, in_shape_data_size, in_shape.data(), in_shape_data_size), "memcpy failed");
|
||||
MS_CHECK_RET_CODE(memcpy_s(out_shape_, out_shape_data_size, out_shape.data(), out_shape_data_size), "memcpy failed");
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
int TransposeFp32Coder::Init() {
|
||||
transpose_parameter_ = reinterpret_cast<TransposeParameter *>(parameter_);
|
||||
MS_CHECK_PTR(transpose_parameter_);
|
||||
return Resize();
|
||||
}
|
||||
|
||||
int TransposeFp32Coder::Prepare(CoderContext *const context) {
|
||||
MS_CHECK_RET_CODE(Init(), "init failed");
|
||||
int out_dims = static_cast<int>(output_tensor_->shape().size());
|
||||
auto out_data_dims_size = static_cast<size_t>(out_dims * thread_h_num_ * sizeof(int));
|
||||
if (out_dims > MAX_TRANSPOSE_DIM_SIZE) {
|
||||
dim_size_ = reinterpret_cast<int *>(allocator_->Malloc(kNumberTypeInt, out_data_dims_size, kWorkspace));
|
||||
MS_CHECK_PTR(dim_size_);
|
||||
position_ = reinterpret_cast<int *>(allocator_->Malloc(kNumberTypeInt, out_data_dims_size, kWorkspace));
|
||||
MS_CHECK_PTR(position_);
|
||||
}
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
int TransposeFp32Coder::DoCode(CoderContext *const context) {
|
||||
int task_id = 0;
|
||||
int num_unit_thread = MSMIN(thread_h_stride_, num_unit_ - task_id * thread_h_stride_);
|
||||
if (num_unit_thread <= 0) {
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
Collect(context, {"nnacl/transpose.h", "nnacl/fp32/transpose.h", "nnacl/errorcode.h"}, {"transpose.c"});
|
||||
|
||||
nnacl::NNaclFp32Serializer code;
|
||||
code.CodeStruct("transpose_parameter", *transpose_parameter_);
|
||||
|
||||
code.CodeFunction("DoTransposeFp32", input_tensor_, output_tensor_, in_shape_, out_shape_, "&transpose_parameter",
|
||||
task_id, num_unit_thread, dim_size_, position_);
|
||||
|
||||
context->AppendCode(code.str());
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_Transpose, CPUOpCoderCreator<TransposeFp32Coder>)
|
||||
} // namespace mindspore::lite::micro
|
|
@ -0,0 +1,53 @@
|
|||
/**
|
||||
* Copyright 2021 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MICRO_LITE_MICRO_CODER_OPCODERS_NNACL_FP32_TRANSPOSE_FP32_CODER_H_
|
||||
#define MICRO_LITE_MICRO_CODER_OPCODERS_NNACL_FP32_TRANSPOSE_FP32_CODER_H_
|
||||
#include <vector>
|
||||
#include "micro/coder/opcoders/op_coder.h"
|
||||
#include "nnacl/transpose.h"
|
||||
namespace mindspore::lite::micro {
|
||||
|
||||
class TransposeFp32Coder final : public OperatorCoder {
|
||||
public:
|
||||
TransposeFp32Coder(const std::vector<Tensor *> &in_tensors, const std::vector<Tensor *> &out_tensors,
|
||||
const Model::Node *node, size_t node_index, Target target)
|
||||
: OperatorCoder(in_tensors, out_tensors, node, node_index, target) {}
|
||||
|
||||
~TransposeFp32Coder() override = default;
|
||||
|
||||
int Prepare(CoderContext *const context) override;
|
||||
|
||||
int DoCode(CoderContext *const context) override;
|
||||
|
||||
int Resize();
|
||||
|
||||
int Init();
|
||||
|
||||
private:
|
||||
TransposeParameter *transpose_parameter_ = nullptr;
|
||||
int thread_num_ = 1;
|
||||
int thread_h_stride_ = 0;
|
||||
int thread_h_num_ = 0;
|
||||
int num_unit_ = 0;
|
||||
int *in_shape_ = nullptr;
|
||||
int *out_shape_ = nullptr;
|
||||
int *dim_size_ = nullptr;
|
||||
int *position_ = nullptr;
|
||||
};
|
||||
|
||||
} // namespace mindspore::lite::micro
|
||||
#endif // MICRO_LITE_MICRO_CODER_OPCODERS_NNACL_FP32_TRANSPOSE_FP32_CODER_H_
|
|
@ -0,0 +1,114 @@
|
|||
/**
|
||||
* Copyright 2021 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "micro/coder/opcoders/nnacl/int8/concat_int8_coder.h"
|
||||
#include <limits>
|
||||
#include "nnacl/int8/concat_int8.h"
|
||||
#include "nnacl/int8/arithmetic_int8.h"
|
||||
#include "nnacl/int8/quantize.h"
|
||||
#include "micro/coder/opcoders/file_collector.h"
|
||||
#include "micro/coder/log.h"
|
||||
#include "micro/coder/opcoders/serializers/nnacl_serializer/nnacl_int8_serializer.h"
|
||||
|
||||
int MallocQuantArgForConcat(ConcatQuantArg *quant_arg, size_t input_num) {
|
||||
quant_arg->in_args_ = static_cast<QuantArg *>(malloc(sizeof(QuantArg) * input_num));
|
||||
MS_CHECK_PTR(quant_arg->in_args_);
|
||||
return mindspore::lite::RET_OK;
|
||||
}
|
||||
|
||||
using mindspore::schema::PrimitiveType_Concat;
|
||||
|
||||
namespace mindspore::lite::micro {
|
||||
int ConcatInt8Coder::Prepare(CoderContext *const context) {
|
||||
this->concat_param_ = reinterpret_cast<ConcatParameter *>(parameter_);
|
||||
|
||||
concat_param_->input_shapes_ = nullptr;
|
||||
size_t input_num = input_tensors().size();
|
||||
MS_CHECK_PTR(input_data_);
|
||||
MS_CHECK_RET_CODE(MallocQuantArgForConcat(&concat_param_->quant_arg_, input_num),
|
||||
"Null pointer reference: quant_concat_parm_->in_quant_args_.");
|
||||
for (int i = 0; i < static_cast<int>(input_num); i++) {
|
||||
auto *input_tensor = input_tensors().at(i);
|
||||
auto quant_args = input_tensor->quant_params();
|
||||
concat_param_->quant_arg_.in_args_[i].scale_ = quant_args.at(0).scale;
|
||||
concat_param_->quant_arg_.in_args_[i].zp_ = quant_args.at(0).zeroPoint;
|
||||
}
|
||||
|
||||
auto quant_args = output_tensor_->quant_params();
|
||||
concat_param_->quant_arg_.out_args_.scale_ = quant_args.at(0).scale;
|
||||
concat_param_->quant_arg_.out_args_.zp_ = quant_args.at(0).zeroPoint;
|
||||
|
||||
concat_param_->quant_arg_.output_activation_min_ = std::numeric_limits<int8_t>::min();
|
||||
concat_param_->quant_arg_.output_activation_max_ = std::numeric_limits<int8_t>::max();
|
||||
// concat base resize
|
||||
axis_ = concat_param_->axis_ >= 0 ? concat_param_->axis_ : input_tensor_->shape().size() + concat_param_->axis_;
|
||||
// concat int8 resize
|
||||
concat_param_->input_num_ = input_num;
|
||||
concat_param_->input_shapes_ = reinterpret_cast<int **>(malloc(sizeof(int *) * input_num));
|
||||
MS_CHECK_PTR(concat_param_->input_shapes_);
|
||||
for (int i = 0; i < static_cast<int>(input_num); i++) {
|
||||
concat_param_->input_shapes_[i] = reinterpret_cast<int *>(input_tensors().at(i)->shape().data());
|
||||
}
|
||||
|
||||
before_axis_size = 1;
|
||||
for (int i = 0; i < axis_ && i < static_cast<int>(output_tensor_->shape().size()); i++) {
|
||||
before_axis_size *= output_tensor_->DimensionSize(i);
|
||||
}
|
||||
|
||||
int64_t after_axis_size = 1;
|
||||
int output_dim = static_cast<int>(output_tensor_->shape().size());
|
||||
concat_param_->output_shapes_ = output_tensor_->shape().data();
|
||||
for (int i = axis_ + 1; i < output_dim; i++) {
|
||||
after_axis_size *= concat_param_->output_shapes_[i];
|
||||
}
|
||||
concat_param_->after_axis_size = after_axis_size;
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
int ConcatInt8Coder::DoCode(CoderContext *const context) {
|
||||
concat_param_->thread_count_ = thread_num_;
|
||||
MS_CHECK_TRUE(thread_num_ > 0, "thread_num_ <= 0");
|
||||
count_unit_ = thread_num_ > 1 ? UP_DIV(before_axis_size, thread_num_) : before_axis_size;
|
||||
concat_param_->count_unit_ = count_unit_;
|
||||
|
||||
Collect(context, {"nnacl/int8/concat_int8.h"}, {"concat_int8.c"});
|
||||
nnacl::NNaclInt8Serializer code;
|
||||
|
||||
int in_tensor_count = input_tensors().size();
|
||||
code << "int8_t *input_data[" << in_tensor_count << "];\n";
|
||||
// input data
|
||||
for (int i = 0; i < static_cast<int>(input_tensors().size()); ++i) {
|
||||
MS_CHECK_PTR(input_tensors().at(i));
|
||||
code << "input_data[" << i << "] = " << allocator_->GetRuntimeAddr(input_tensors().at(i)) << ";\n";
|
||||
}
|
||||
code.CodeStruct("concat_param", *concat_param_, in_tensor_count, input_tensor_->shape().size(),
|
||||
output_tensor_->shape().size());
|
||||
|
||||
if (thread_num_ > 1) {
|
||||
code.CodeBaseStruct("ConcatInt8Args", "args", "input_data", output_tensor_, "&concat_param", axis_,
|
||||
before_axis_size, count_unit_);
|
||||
code.CodeFunction("ParallelLaunch", "THREAD_POOL_DEFAULT", "ConcatInt8Run", "&args", "thread_num");
|
||||
} else {
|
||||
int task_id = 0;
|
||||
int64_t real_dst_count = MSMIN(before_axis_size - task_id * count_unit_, count_unit_);
|
||||
code.CodeFunction("Int8Concat", "input_data", output_tensor_, "&concat_param", axis_, real_dst_count, task_id);
|
||||
}
|
||||
context->AppendCode(code.str());
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
REG_OPERATOR_CODER(kAllTargets, kNumberTypeInt8, PrimitiveType_Concat, CPUOpCoderCreator<ConcatInt8Coder>)
|
||||
} // namespace mindspore::lite::micro
|
|
@ -0,0 +1,56 @@
|
|||
/**
|
||||
* Copyright 2021 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDSPORE_LITE_MICRO_CODER_OPCODERS_CONCAT_INT8_CODER_H_
|
||||
#define MINDSPORE_LITE_MICRO_CODER_OPCODERS_CONCAT_INT8_CODER_H_
|
||||
|
||||
#include <cstring>
|
||||
#include <vector>
|
||||
#include "micro/coder/opcoders/op_coder.h"
|
||||
#include "nnacl/int8/concat_int8.h"
|
||||
|
||||
namespace mindspore::lite::micro {
|
||||
class ConcatInt8Coder : public OperatorCoder {
|
||||
public:
|
||||
ConcatInt8Coder(const std::vector<Tensor *> &in_tensors, const std::vector<Tensor *> &out_tensors,
|
||||
const Model::Node *node, size_t node_index, Target target)
|
||||
: OperatorCoder(in_tensors, out_tensors, node, node_index, target) {}
|
||||
|
||||
~ConcatInt8Coder() {
|
||||
if (concat_param_ == nullptr) {
|
||||
return;
|
||||
}
|
||||
if (concat_param_->quant_arg_.in_args_ != nullptr) {
|
||||
free(concat_param_->quant_arg_.in_args_);
|
||||
}
|
||||
if (concat_param_->input_shapes_ != nullptr) {
|
||||
free(concat_param_->input_shapes_);
|
||||
}
|
||||
}
|
||||
|
||||
int Prepare(CoderContext *const context) override;
|
||||
|
||||
int DoCode(CoderContext *const context) override;
|
||||
|
||||
private:
|
||||
ConcatParameter *concat_param_{nullptr};
|
||||
int64_t before_axis_size{0};
|
||||
int64_t count_unit_{0};
|
||||
int8_t *input_data_{nullptr};
|
||||
int axis_ = 0;
|
||||
};
|
||||
} // namespace mindspore::lite::micro
|
||||
#endif // MINDSPORE_LITE_MICRO_CODER_OPCODERS_CONCAT_INT8_CODER_H_
|
|
@ -0,0 +1,82 @@
|
|||
/**
|
||||
* Copyright 2021 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#include "micro/coder/opcoders/nnacl/int8/pooling_int8_coder.h"
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include "nnacl/int8/pooling_int8.h"
|
||||
#include "micro/coder/log.h"
|
||||
#include "micro/coder/opcoders/serializers/nnacl_serializer/nnacl_int8_serializer.h"
|
||||
#include "micro/coder/opcoders/file_collector.h"
|
||||
|
||||
using std::string;
|
||||
|
||||
using mindspore::schema::PrimitiveType_Pooling;
|
||||
namespace mindspore::lite::micro {
|
||||
|
||||
int PoolingInt8Coder::DoCode(CoderContext *const context) {
|
||||
// attribute
|
||||
auto *pooling_parameter = reinterpret_cast<PoolingParameter *>(parameter_);
|
||||
MS_CHECK_PTR(pooling_parameter);
|
||||
// init struct PoolingParameters
|
||||
Tensor *in_tensor = input_tensors_.at(kInputIndex);
|
||||
Tensor *out_tensor = output_tensors_.at(kOutputIndex);
|
||||
MS_CHECK_PTR(in_tensor);
|
||||
MS_CHECK_PTR(out_tensor);
|
||||
pooling_parameter->input_batch_ = in_tensor->Batch();
|
||||
pooling_parameter->input_channel_ = in_tensor->Channel();
|
||||
pooling_parameter->input_h_ = in_tensor->Height();
|
||||
pooling_parameter->input_w_ = in_tensor->Width();
|
||||
pooling_parameter->output_batch_ = out_tensor->Batch();
|
||||
pooling_parameter->output_channel_ = out_tensor->Channel();
|
||||
pooling_parameter->output_h_ = out_tensor->Height();
|
||||
pooling_parameter->output_w_ = out_tensor->Width();
|
||||
|
||||
// get quant params
|
||||
std::vector<QuantArg> in_quant_args = in_tensor->quant_params();
|
||||
std::vector<QuantArg> out_quant_args = out_tensor->quant_params();
|
||||
Collect(context, {"nnacl/int8/pooling_int8.h", "nnacl/errorcode.h"}, {"pooling_int8.c"});
|
||||
nnacl::NNaclInt8Serializer code;
|
||||
code.precision(kPrecision);
|
||||
// code op parameter
|
||||
::QuantArg quant_arg_in = {static_cast<float>(in_quant_args.at(0).scale), in_quant_args.at(0).zeroPoint};
|
||||
::QuantArg quant_arg_out = {static_cast<float>(out_quant_args.at(0).scale), out_quant_args.at(0).zeroPoint};
|
||||
::QuantArg *quant_args[2] = {&quant_arg_in, &quant_arg_out};
|
||||
pooling_parameter->quant_args_ = quant_args;
|
||||
code.CodeStruct("pooling_parameter", *pooling_parameter);
|
||||
|
||||
if (thread_num_ > 1) {
|
||||
code.CodeBaseStruct("PoolingInt8Args", "args", in_tensor, out_tensor, "(PoolingParameter *)&pooling_parameter");
|
||||
if (pooling_parameter->pool_mode_ == PoolMode_MaxPool) {
|
||||
code.CodeFunction("ParallelLaunch", "THREAD_POOL_DEFAULT", "MaxPoolingInt8Run", "&args", "thread_num");
|
||||
} else {
|
||||
code.CodeFunction("ParallelLaunch", "THREAD_POOL_DEFAULT", "AvgPoolingInt8Run", "&args", "thread_num");
|
||||
}
|
||||
} else {
|
||||
int task_id = 0;
|
||||
if (pooling_parameter->pool_mode_ == PoolMode_MaxPool) {
|
||||
code.CodeFunction("MaxPoolingInt8", in_tensor, out_tensor, "(PoolingParameter *)&pooling_parameter", task_id);
|
||||
} else {
|
||||
code.CodeFunction("AvgPoolingInt8", in_tensor, out_tensor, "(PoolingParameter *)&pooling_parameter", task_id);
|
||||
}
|
||||
}
|
||||
MS_LOG(INFO) << "PoolingInt8Code has been called";
|
||||
context->AppendCode(code.str());
|
||||
return lite::RET_OK;
|
||||
}
|
||||
|
||||
REG_OPERATOR_CODER(kAllTargets, kNumberTypeInt8, PrimitiveType_Pooling, CPUOpCoderCreator<PoolingInt8Coder>)
|
||||
} // namespace mindspore::lite::micro
|
|
@ -0,0 +1,42 @@
|
|||
/**
|
||||
* Copyright 2021 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDSPORE_LITE_MICRO_CODER_OPCODERS_POOLING_INT8_CODER_H
|
||||
#define MINDSPORE_LITE_MICRO_CODER_OPCODERS_POOLING_INT8_CODER_H
|
||||
|
||||
#include <string>
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
#include "micro/coder/opcoders/op_coder.h"
|
||||
|
||||
namespace mindspore::lite::micro {
|
||||
|
||||
class PoolingInt8Coder final : public OperatorCoder {
|
||||
public:
|
||||
PoolingInt8Coder(const std::vector<Tensor *> &in_tensors, const std::vector<Tensor *> &out_tensors,
|
||||
const Model::Node *node, size_t node_index, Target target)
|
||||
: OperatorCoder(in_tensors, out_tensors, node, node_index, target) {}
|
||||
|
||||
~PoolingInt8Coder() override = default;
|
||||
|
||||
int Prepare(CoderContext *const context) override { return RET_OK; }
|
||||
|
||||
int DoCode(CoderContext *const context) override;
|
||||
};
|
||||
|
||||
} // namespace mindspore::lite::micro
|
||||
|
||||
#endif // MINDSPORE_LITE_MICRO_CODER_OPCODERS_POOLING_INT8_CODER_H
|
|
@ -0,0 +1,58 @@
|
|||
/**
|
||||
* Copyright 2021 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "micro/coder/opcoders/nnacl/int8/reshape_int8_coder.h"
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#include "micro/coder/opcoders/file_collector.h"
|
||||
#include "micro/coder/log.h"
|
||||
#include "micro/coder/opcoders/serializers/nnacl_serializer/nnacl_int8_serializer.h"
|
||||
|
||||
using mindspore::schema::PrimitiveType_Reshape;
|
||||
|
||||
namespace mindspore::lite::micro {
|
||||
|
||||
int ReshapeInt8Coder::DoCode(CoderContext *const context) {
|
||||
Tensor *input = OperatorCoder::input_tensors().at(kInputIndex);
|
||||
Tensor *output = OperatorCoder::output_tensors().at(kOutputIndex);
|
||||
MS_CHECK_PTR(input);
|
||||
MS_CHECK_PTR(output);
|
||||
int elements_num = input->ElementsNum();
|
||||
std::vector<QuantArg> input_quant_args = input->quant_params();
|
||||
std::vector<QuantArg> output_quant_args = output->quant_params();
|
||||
|
||||
Collect(context, {"nnacl/int8/reshape_int8.h"}, {"reshape_int8.c"});
|
||||
nnacl::NNaclInt8Serializer code;
|
||||
code.precision(kPrecision);
|
||||
ReshapeQuantArg reshape_quant_arg = {
|
||||
{static_cast<float>(input_quant_args.at(0).scale), input_quant_args.at(0).zeroPoint},
|
||||
{static_cast<float>(output_quant_args.at(0).scale), output_quant_args.at(0).zeroPoint},
|
||||
INT8_MIN,
|
||||
INT8_MAX};
|
||||
code.CodeStruct("reshape_quant_arg", reshape_quant_arg);
|
||||
|
||||
if (thread_num_ > 1) {
|
||||
code.CodeBaseStruct("ReshapeInt8Args", "args", input, output, elements_num, thread_num_s_, "reshape_quant_arg");
|
||||
CODE_PARALLEL_FUNC("ReshapeInt8Run");
|
||||
} else {
|
||||
code.CodeFunction("Int8Reshape", input, output, elements_num, "reshape_quant_arg");
|
||||
}
|
||||
context->AppendCode(code.str());
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
REG_OPERATOR_CODER(kAllTargets, kNumberTypeInt8, PrimitiveType_Reshape, CPUOpCoderCreator<ReshapeInt8Coder>)
|
||||
} // namespace mindspore::lite::micro
|
|
@ -0,0 +1,37 @@
|
|||
/**
|
||||
* Copyright 2021 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDSPORE_LITE_MICRO_CODER_OPCODERS_RESHAPE_INT8_CODER_H_
|
||||
#define MINDSPORE_LITE_MICRO_CODER_OPCODERS_RESHAPE_INT8_CODER_H_
|
||||
|
||||
#include <vector>
|
||||
#include "micro/coder/opcoders/op_coder.h"
|
||||
|
||||
namespace mindspore::lite::micro {
|
||||
class ReshapeInt8Coder : public OperatorCoder {
|
||||
public:
|
||||
ReshapeInt8Coder(const std::vector<Tensor *> &in_tensors, const std::vector<Tensor *> &out_tensors,
|
||||
const Model::Node *node, size_t node_index, Target target)
|
||||
: OperatorCoder(in_tensors, out_tensors, node, node_index, target) {}
|
||||
|
||||
~ReshapeInt8Coder() override = default;
|
||||
|
||||
int Prepare(CoderContext *const context) override { return RET_OK; }
|
||||
|
||||
int DoCode(CoderContext *const context) override;
|
||||
};
|
||||
} // namespace mindspore::lite::micro
|
||||
#endif // MINDSPORE_LITE_MICRO_CODER_OPCODERS_RESHAPE_INT8_CODER_H_
|
|
@ -0,0 +1,94 @@
|
|||
/**
|
||||
* Copyright 2021 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "micro/coder/opcoders/serializers/nnacl_serializer/nnacl_fp32_serializer.h"
|
||||
#include "src/common/log_adapter.h"
|
||||
#include "micro/coder/log.h"
|
||||
#include "nnacl/pooling_parameter.h"
|
||||
|
||||
namespace mindspore::lite::micro::nnacl {
|
||||
|
||||
void NNaclFp32Serializer::CodeStruct(const std::string &name, const PoolingParameter &pooling_parameter) {
|
||||
CodeBaseStruct("PoolingParameter", name, pooling_parameter.op_parameter_, pooling_parameter.pool_mode_,
|
||||
pooling_parameter.round_mode_, pooling_parameter.act_type_, pooling_parameter.avg_mode_,
|
||||
pooling_parameter.global_, pooling_parameter.window_w_, pooling_parameter.window_h_,
|
||||
pooling_parameter.stride_w_, pooling_parameter.stride_h_, pooling_parameter.input_w_,
|
||||
pooling_parameter.input_w_, pooling_parameter.input_batch_, pooling_parameter.input_channel_,
|
||||
pooling_parameter.output_w_, pooling_parameter.output_h_, pooling_parameter.output_batch_,
|
||||
pooling_parameter.output_channel_, pooling_parameter.pad_u_, pooling_parameter.pad_d_,
|
||||
pooling_parameter.pad_l_, pooling_parameter.pad_r_, pooling_parameter.thread_num_, "NULL",
|
||||
pooling_parameter.quantize_);
|
||||
}
|
||||
|
||||
void NNaclFp32Serializer::CodeStruct(const std::string &name, const BatchNormParameter &batch_norm_parameter) {
|
||||
CodeBaseStruct("BatchNormParameter", name, batch_norm_parameter.op_parameter_, batch_norm_parameter.epsilon_,
|
||||
batch_norm_parameter.momentum_, batch_norm_parameter.unit_, batch_norm_parameter.units_,
|
||||
batch_norm_parameter.channel_, batch_norm_parameter.fused_);
|
||||
}
|
||||
|
||||
void NNaclFp32Serializer::CodeStruct(const std::string &name, const ArithmeticParameter &arithmetic_parameter) {
|
||||
CodeBaseStruct("ArithmeticParameter", name, arithmetic_parameter.op_parameter_, arithmetic_parameter.broadcasting_,
|
||||
arithmetic_parameter.ndim_, arithmetic_parameter.activation_type_,
|
||||
ToString(arithmetic_parameter.in_shape0_), arithmetic_parameter.in_elements_num0_,
|
||||
ToString(arithmetic_parameter.in_shape1_), arithmetic_parameter.in_elements_num1_,
|
||||
ToString(arithmetic_parameter.out_shape_), arithmetic_parameter.out_elements_num_,
|
||||
ToString(arithmetic_parameter.in_strides0_), ToString(arithmetic_parameter.in_strides1_),
|
||||
ToString(arithmetic_parameter.out_strides_), ToString(arithmetic_parameter.multiples0_),
|
||||
ToString(arithmetic_parameter.multiples1_));
|
||||
}
|
||||
|
||||
void NNaclFp32Serializer::CodeStruct(const std::string &name, const SoftmaxParameter &softmax_parameter) {
|
||||
CodeBaseStruct("SoftmaxParameter", name, softmax_parameter.op_parameter_, softmax_parameter.axis_,
|
||||
ToString(softmax_parameter.input_shape_), softmax_parameter.element_size_, softmax_parameter.n_dim_);
|
||||
}
|
||||
|
||||
void NNaclFp32Serializer::CodeStruct(const std::string &name, const ConvParameter &conv_parameter) {
|
||||
CodeBaseStruct("ConvParameter", name, conv_parameter.op_parameter_, "{NULL}", conv_parameter.kernel_h_,
|
||||
conv_parameter.kernel_w_, conv_parameter.stride_h_, conv_parameter.stride_w_,
|
||||
conv_parameter.dilation_h_, conv_parameter.dilation_w_, conv_parameter.pad_u_, conv_parameter.pad_d_,
|
||||
conv_parameter.pad_l_, conv_parameter.pad_r_, conv_parameter.group_, conv_parameter.tile_num_,
|
||||
conv_parameter.input_batch_, conv_parameter.input_h_, conv_parameter.input_w_,
|
||||
conv_parameter.input_channel_, conv_parameter.output_batch_, conv_parameter.output_h_,
|
||||
conv_parameter.output_w_, conv_parameter.output_channel_, conv_parameter.op_parameter_.thread_num_,
|
||||
conv_parameter.input_unit_, conv_parameter.output_unit_, conv_parameter.act_type_);
|
||||
}
|
||||
|
||||
void NNaclFp32Serializer::CodeStruct(const std::string &name, const ScaleParameter &scale_parameter) {
|
||||
CodeBaseStruct("ScaleParameter", name, scale_parameter.op_parameter_, scale_parameter.outer_size_,
|
||||
scale_parameter.axis_size_, scale_parameter.inner_size_, scale_parameter.axis_,
|
||||
scale_parameter.const_scale_, scale_parameter.const_offset_);
|
||||
}
|
||||
|
||||
void NNaclFp32Serializer::CodeStruct(const std::string &name, const SliceParameter &slice_parameter) {
|
||||
CodeBaseStruct("SliceParameter", name, slice_parameter.op_parameter_, ToString(slice_parameter.shape_),
|
||||
ToString(slice_parameter.begin_), ToString(slice_parameter.end_), ToString(slice_parameter.size_),
|
||||
"{0}", slice_parameter.param_length_);
|
||||
}
|
||||
|
||||
void NNaclFp32Serializer::CodeStruct(const std::string &name, const TileParameter &tile_parameter) {
|
||||
CodeBaseStruct("TileParameter", name, tile_parameter.op_parameter_, ToString(tile_parameter.multiples_),
|
||||
ToString(tile_parameter.in_shape_), ToString(tile_parameter.out_shape_),
|
||||
ToString(tile_parameter.in_strides_), ToString(tile_parameter.out_strides_), tile_parameter.in_dim_);
|
||||
}
|
||||
|
||||
void NNaclFp32Serializer::CodeStruct(const std::string &name, const TransposeParameter &transpose_parameter) {
|
||||
CodeBaseStruct("TransposeParameter", name, transpose_parameter.op_parameter_, ToString(transpose_parameter.perm_),
|
||||
transpose_parameter.conjugate_, ToString(transpose_parameter.strides_),
|
||||
ToString(transpose_parameter.out_strides_), transpose_parameter.num_axes_,
|
||||
transpose_parameter.data_size_);
|
||||
}
|
||||
|
||||
} // namespace mindspore::lite::micro::nnacl
|
|
@ -0,0 +1,51 @@
|
|||
/**
|
||||
* Copyright 2021 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MICRO_MICRO_CODER_OPCODERS_SERIALIZERS_NNACL_FP32_SERIALIZER_H_
|
||||
#define MICRO_MICRO_CODER_OPCODERS_SERIALIZERS_NNACL_FP32_SERIALIZER_H_
|
||||
#include <string>
|
||||
#include <sstream>
|
||||
#include "micro/coder/opcoders/serializers/serializer.h"
|
||||
#include "nnacl/batchnorm_parameter.h"
|
||||
#include "nnacl/fp32/arithmetic_fp32.h"
|
||||
#include "nnacl/conv_parameter.h"
|
||||
#include "nnacl/matmul_parameter.h"
|
||||
#include "nnacl/scale.h"
|
||||
#include "nnacl/slice_parameter.h"
|
||||
#include "nnacl/base/tile_base.h"
|
||||
#include "nnacl/fp32/transpose_fp32.h"
|
||||
#include "nnacl/pooling_parameter.h"
|
||||
#include "nnacl/softmax_parameter.h"
|
||||
namespace mindspore::lite::micro::nnacl {
|
||||
|
||||
class NNaclFp32Serializer : public Serializer {
|
||||
public:
|
||||
NNaclFp32Serializer() = default;
|
||||
~NNaclFp32Serializer() = default;
|
||||
void CodeStruct(const std::string &name, const PoolingParameter &pooling_parameter);
|
||||
void CodeStruct(const std::string &name, const SoftmaxParameter &softmax_parameter);
|
||||
void CodeStruct(const std::string &name, const BatchNormParameter &batch_norm_parameter);
|
||||
void CodeStruct(const std::string &name, const ArithmeticParameter &arithmetic_parameter);
|
||||
void CodeStruct(const std::string &name, const ConvParameter &conv_parameter);
|
||||
void CodeStruct(const std::string &name, const MatMulParameter &mat_mul_parameter);
|
||||
void CodeStruct(const std::string &name, const ScaleParameter &scale_parameter);
|
||||
void CodeStruct(const std::string &name, const SliceParameter &slice_parameter);
|
||||
void CodeStruct(const std::string &name, const TileParameter &tile_parameter);
|
||||
void CodeStruct(const std::string &name, const TransposeParameter &transpose_parameter);
|
||||
};
|
||||
|
||||
} // namespace mindspore::lite::micro::nnacl
|
||||
#endif // MICRO_MICRO_CODER_OPCODERS_SERIALIZERS_NNACL_FP32_ERIALIZER_H_
|
|
@ -0,0 +1,119 @@
|
|||
/**
|
||||
* Copyright 2021 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#include "micro/coder/opcoders/serializers/nnacl_serializer/nnacl_int8_serializer.h"
|
||||
#include <string>
|
||||
#include "src/common/log_adapter.h"
|
||||
#include "micro/coder/log.h"
|
||||
|
||||
namespace mindspore::lite::micro::nnacl {
|
||||
void NNaclInt8Serializer::CodeStruct(const std::string &name, const ArithmeticParameter &arithmetic_parameter) {
|
||||
CodeBaseStruct("ArithmeticParameter", name, arithmetic_parameter.op_parameter_, arithmetic_parameter.broadcasting_,
|
||||
arithmetic_parameter.ndim_, arithmetic_parameter.activation_type_,
|
||||
ToString(arithmetic_parameter.in_shape0_), arithmetic_parameter.in_elements_num0_,
|
||||
ToString(arithmetic_parameter.in_shape1_), arithmetic_parameter.in_elements_num1_,
|
||||
ToString(arithmetic_parameter.out_shape_), arithmetic_parameter.out_elements_num_,
|
||||
ToString(arithmetic_parameter.in_strides0_), ToString(arithmetic_parameter.in_strides1_),
|
||||
ToString(arithmetic_parameter.out_strides_), ToString(arithmetic_parameter.multiples0_),
|
||||
ToString(arithmetic_parameter.multiples1_));
|
||||
}
|
||||
|
||||
void NNaclInt8Serializer::CodeStruct(const std::string &name, const PoolingParameter &pooling_parameter) {
|
||||
std::string quant_name = name + "_quant";
|
||||
std::string in_quant_name = quant_name + "_in";
|
||||
std::string out_quant_name = quant_name + "_out";
|
||||
|
||||
MS_CHECK_PTR_IF_NULL(pooling_parameter.quant_args_);
|
||||
::QuantArg *in_quant_args = pooling_parameter.quant_args_[0];
|
||||
::QuantArg *out_quant_args = pooling_parameter.quant_args_[1];
|
||||
MS_CHECK_PTR_IF_NULL(in_quant_args);
|
||||
MS_CHECK_PTR_IF_NULL(out_quant_args);
|
||||
|
||||
code << "static QuantArg " << in_quant_name << " = " << *out_quant_args << ";\n";
|
||||
code << "static QuantArg " << out_quant_name << " = " << *out_quant_args << ";\n";
|
||||
|
||||
code << "static QuantArg *" << quant_name << "[2] = {"
|
||||
<< " &" << in_quant_name << ", "
|
||||
<< " &" << out_quant_name << "};\n";
|
||||
|
||||
CodeBaseStruct("PoolingParameter", name, pooling_parameter.op_parameter_, pooling_parameter.pool_mode_,
|
||||
pooling_parameter.round_mode_, pooling_parameter.act_type_, pooling_parameter.avg_mode_,
|
||||
pooling_parameter.global_, pooling_parameter.window_w_, pooling_parameter.window_h_,
|
||||
pooling_parameter.stride_w_, pooling_parameter.stride_h_, pooling_parameter.input_w_,
|
||||
pooling_parameter.input_h_, pooling_parameter.input_batch_, pooling_parameter.input_channel_,
|
||||
pooling_parameter.output_w_, pooling_parameter.output_h_, pooling_parameter.output_batch_,
|
||||
pooling_parameter.output_channel_, pooling_parameter.pad_u_, pooling_parameter.pad_d_,
|
||||
pooling_parameter.pad_l_, pooling_parameter.pad_r_, pooling_parameter.op_parameter_.thread_num_,
|
||||
quant_name, pooling_parameter.quantize_);
|
||||
}
|
||||
|
||||
void NNaclInt8Serializer::CodeStruct(const std::string &name, const SoftmaxParameter &softmax_parameter) {
|
||||
CodeBaseStruct("SoftmaxParameter", name, softmax_parameter.op_parameter_, softmax_parameter.axis_,
|
||||
ToString(softmax_parameter.input_shape_), softmax_parameter.element_size_, softmax_parameter.n_dim_);
|
||||
}
|
||||
|
||||
void NNaclInt8Serializer::CodeStruct(const std::string &name, const SoftmaxQuantArg &softmax_quant_parameter) {
|
||||
CodeBaseStruct("SoftmaxQuantArg", name, softmax_quant_parameter.in_quant_args_,
|
||||
softmax_quant_parameter.out_quant_arg_, softmax_quant_parameter.output_activation_min_,
|
||||
softmax_quant_parameter.output_activation_max_, softmax_quant_parameter.output_multiplier_,
|
||||
softmax_quant_parameter.shift_left_, softmax_quant_parameter.shift_right_);
|
||||
}
|
||||
|
||||
void NNaclInt8Serializer::CodeStruct(const std::string &name, const ConcatParameter &concat_parameter,
|
||||
int in_tensor_count, int in_shape, int out_shape) {
|
||||
std::string quant_arg_name = name + "_quant_arg";
|
||||
std::string in_args_name = quant_arg_name + "_in_args";
|
||||
std::string input_shapes_name = name + "_input_shapes";
|
||||
std::string output_shapes_name = name + "_output_shapes";
|
||||
|
||||
CodeArray(in_args_name, concat_parameter.quant_arg_.in_args_, in_tensor_count, false);
|
||||
CodeBaseStruct("ConcatQuantArg", quant_arg_name, in_args_name, concat_parameter.quant_arg_.out_args_,
|
||||
concat_parameter.quant_arg_.output_activation_min_,
|
||||
concat_parameter.quant_arg_.output_activation_max_);
|
||||
|
||||
auto get_shape_name = [&input_shapes_name](int i) { return input_shapes_name + "_" + std::to_string(i); };
|
||||
// input_shape
|
||||
for (int i = 0; i < in_tensor_count; ++i) {
|
||||
CodeArray(get_shape_name(i), concat_parameter.input_shapes_[i], in_shape);
|
||||
}
|
||||
|
||||
code << "const int *" << input_shapes_name << "[] = {";
|
||||
for (int i = 0; i < in_tensor_count; ++i) {
|
||||
code << get_shape_name(i) << " ,";
|
||||
}
|
||||
code << "};\n";
|
||||
// output_shape
|
||||
CodeArray(output_shapes_name, concat_parameter.output_shapes_, out_shape, false);
|
||||
|
||||
CodeBaseStruct("ConcatParameter", name, concat_parameter.op_parameter_, quant_arg_name, concat_parameter.axis_,
|
||||
concat_parameter.thread_count_, concat_parameter.input_num_, input_shapes_name, output_shapes_name,
|
||||
concat_parameter.after_axis_size, concat_parameter.count_unit_);
|
||||
}
|
||||
|
||||
void NNaclInt8Serializer::CodeStruct(const std::string &name, const ReduceQuantArg &reduce_quant_arg) {
|
||||
CodeBaseStruct(
|
||||
"ReduceQuantArg", name, reduce_quant_arg.in_scale_, reduce_quant_arg.in_zp_, reduce_quant_arg.out_scale_,
|
||||
reduce_quant_arg.out_zp_, reduce_quant_arg.in_out_multiplier_, reduce_quant_arg.in_out_left_shift_,
|
||||
reduce_quant_arg.in_out_right_shift_, reduce_quant_arg.mean_multiplier_, reduce_quant_arg.mean_left_shift_,
|
||||
reduce_quant_arg.mean_right_shift_, reduce_quant_arg.prod_multiplier_, reduce_quant_arg.prod_left_shift_,
|
||||
reduce_quant_arg.prod_right_shift_, reduce_quant_arg.sum_square_multiplier_,
|
||||
reduce_quant_arg.sum_square_left_shift_, reduce_quant_arg.sum_square_right_shift_);
|
||||
}
|
||||
void NNaclInt8Serializer::CodeStruct(const std::string &name, const ReshapeQuantArg &reshape_quant_arg) {
|
||||
CodeBaseStruct("ReshapeQuantArg", name, reshape_quant_arg.in_args_, reshape_quant_arg.out_args_,
|
||||
reshape_quant_arg.output_activation_min_, reshape_quant_arg.output_activation_max_);
|
||||
}
|
||||
|
||||
} // namespace mindspore::lite::micro::nnacl
|
|
@ -0,0 +1,51 @@
|
|||
/**
|
||||
* Copyright 2021 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#ifndef MICRO_MICRO_CODER_OPCODERS_SERIALIZERS_NNACL_INT8_SERIALIZER_H_
|
||||
#define MICRO_MICRO_CODER_OPCODERS_SERIALIZERS_NNACL_INT8_SERIALIZER_H_
|
||||
#include <ostream>
|
||||
#include <string>
|
||||
#include "nnacl/pooling_parameter.h"
|
||||
#include "nnacl/softmax_parameter.h"
|
||||
#include "micro/coder/opcoders/serializers/serializer.h"
|
||||
#include "nnacl/int8/add_int8.h"
|
||||
#include "nnacl/int8/arithmetic_int8.h"
|
||||
#include "nnacl/conv_parameter.h"
|
||||
#include "nnacl/matmul_parameter.h"
|
||||
#include "nnacl/int8/concat_int8.h"
|
||||
#include "nnacl/int8/quantize.h"
|
||||
#include "nnacl/reshape_parameter.h"
|
||||
|
||||
namespace mindspore::lite::micro::nnacl {
|
||||
|
||||
class NNaclInt8Serializer : public Serializer {
|
||||
public:
|
||||
NNaclInt8Serializer() = default;
|
||||
~NNaclInt8Serializer() = default;
|
||||
void CodeStruct(const std::string &name, const ConvParameter &conv_parameter);
|
||||
void CodeStruct(const std::string &name, const MatMulParameter &matmul_parameter);
|
||||
void CodeStruct(const std::string &name, const AddQuantParameter &add_quant_parameter);
|
||||
void CodeStruct(const std::string &name, const ArithmeticParameter &arithmetic_parameter);
|
||||
void CodeStruct(const std::string &name, const PoolingParameter &pooling_parameter);
|
||||
void CodeStruct(const std::string &name, const SoftmaxParameter &softmax_parameter);
|
||||
void CodeStruct(const std::string &name, const SoftmaxQuantArg &softmax_quant_parameter);
|
||||
void CodeStruct(const std::string &name, const ConcatParameter &concat_parameter, int input_tensors, int in_shape,
|
||||
int out_shape);
|
||||
void CodeStruct(const std::string &name, const ReduceQuantArg &reduce_quant_arg);
|
||||
void CodeStruct(const std::string &name, const ReshapeQuantArg &reshape_quant_arg);
|
||||
};
|
||||
|
||||
} // namespace mindspore::lite::micro::nnacl
|
||||
#endif // MICRO_MICRO_CODER_OPCODERS_SERIALIZERS_NNACL_INT8_SERIALIZER_H_
|
|
@ -0,0 +1,65 @@
|
|||
/**
|
||||
* Copyright 2021 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef LITE_MICRO_CODER_OPCODERS_SERIALIZERS_NNACL_STREAM_UTILS_H_
|
||||
#define LITE_MICRO_CODER_OPCODERS_SERIALIZERS_NNACL_STREAM_UTILS_H_
|
||||
#include <sstream>
|
||||
#include <string>
|
||||
#include "nnacl/pooling_parameter.h"
|
||||
#include "nnacl/softmax_parameter.h"
|
||||
#include "nnacl/int8/quantize.h"
|
||||
|
||||
namespace mindspore::lite::micro {
|
||||
|
||||
inline std::ostream &operator<<(std::ostream &code, const ::QuantArg &quant_arg) {
|
||||
code << "{" << static_cast<float>(quant_arg.scale_) << ", " << quant_arg.zp_ << "}";
|
||||
return code;
|
||||
}
|
||||
|
||||
inline std::ostream &operator<<(std::ostream &code, const OpParameter &tile) {
|
||||
code << "{ \"\""
|
||||
<< ", " << tile.type_ << ", " << tile.thread_num_ << "}";
|
||||
return code;
|
||||
}
|
||||
|
||||
inline std::ostream &operator<<(std::ostream &code, PoolMode pool_mode) {
|
||||
code << "(PoolMode)"
|
||||
<< "(" << static_cast<int>(pool_mode) << ")";
|
||||
return code;
|
||||
}
|
||||
|
||||
inline std::ostream &operator<<(std::ostream &code, RoundMode round_mode) {
|
||||
code << "(RoundMode)"
|
||||
<< "(" << static_cast<int>(round_mode) << ")";
|
||||
return code;
|
||||
}
|
||||
|
||||
inline std::ostream &operator<<(std::ostream &code, ActType act_type) {
|
||||
code << "(ActType)"
|
||||
<< "(" << static_cast<int>(act_type) << ")";
|
||||
return code;
|
||||
}
|
||||
|
||||
inline std::ostream &operator<<(std::ostream &code, DataOrder data_order) {
|
||||
if (data_order == RowMajor) {
|
||||
code << "RowMajor";
|
||||
} else {
|
||||
code << "ColMajor";
|
||||
}
|
||||
return code;
|
||||
}
|
||||
} // namespace mindspore::lite::micro
|
||||
#endif // LITE_MICRO_CODER_OPCODERS_SERIALIZERS_NNACL_STREAM_UTILS_H_
|
|
@ -22,6 +22,7 @@
|
|||
#include <sstream>
|
||||
#include "micro/coder/utils/print_utils.h"
|
||||
#include "micro/coder/allocator/allocator.h"
|
||||
#include "micro/coder/opcoders/serializers/nnacl_serializer/nnacl_stream_utils.h"
|
||||
|
||||
namespace mindspore::lite::micro {
|
||||
|
||||
|
|
Loading…
Reference in New Issue