!17765 fix bot engine cxx

From: @yangjie159
Reviewed-by: @jpc_chenjianping,@wangchengyuan
Signed-off-by: @wangchengyuan
This commit is contained in:
mindspore-ci-bot 2021-06-05 05:34:42 +08:00 committed by Gitee
commit a65f903fad
28 changed files with 27 additions and 57 deletions

View File

@ -21,7 +21,6 @@
using mindspore::schema::PrimitiveType_Softmax;
namespace mindspore::lite::micro::cmsis {
int SoftMaxInt8Coder::Prepare(CoderContext *const context) {
SoftmaxBaseCoder::Init();
@ -51,7 +50,7 @@ int SoftMaxInt8Coder::Prepare(CoderContext *const context) {
const int trailing_dim = static_cast<int>(input_tensor_->shape().size()) - 1;
const int dims_count = input_tensor_->shape().size();
MS_CHECK_TRUE(0 <= trailing_dim && trailing_dim < dims_count, "trailing_dim should be in [0, dims_count)");
MS_CHECK_TRUE(trailing_dim >= 0 && trailing_dim < dims_count, "trailing_dim should be in [0, dims_count)");
num_rows_ = 1;
for (int i = 0; i < dims_count; ++i) {
num_rows_ *= (i == trailing_dim) ? 1 : input_tensor_->DimensionSize(i);
@ -77,11 +76,9 @@ int SoftMaxInt8Coder::DoCode(CoderContext *const context) {
"arm_softmax_s8.c",
});
code.CodeFunction("arm_softmax_s8", input_tensor_, num_rows_, row_size_, mult_, shift_, diff_min_, output_tensor_);
MS_LOG(INFO) << "SoftMaxInt8Coder has been called";
context->AppendCode(code.str());
return RET_OK;
}
REG_OPERATOR_CODER(kARM32M, kNumberTypeInt8, PrimitiveType_Softmax, CPUOpCoderCreator<SoftMaxInt8Coder>)
} // namespace mindspore::lite::micro::cmsis

View File

@ -19,9 +19,9 @@
#include <vector>
#include "coder/opcoders/serializers/nnacl_serializer/nnacl_fp32_serializer.h"
static constexpr int kPerTensor = 1;
static constexpr size_t kPerBatch = 3;
namespace mindspore::lite::micro::nnacl {
constexpr int kPerTensor = 1;
constexpr size_t kPerBatch = 3;
void Dequant::set_de_quant_buffer_str(const std::string &dequant_buffer_str) {
de_quant_buffer_str_ = "(float *)(" + dequant_buffer_str + ")";

View File

@ -24,7 +24,6 @@
using mindspore::schema::PrimitiveType_Activation;
namespace mindspore::lite::micro::nnacl {
int ActivationFP32Coder::DoCode(CoderContext *const context) {
// attribute
auto *activation_parameter = reinterpret_cast<ActivationParameter *>(parameter_);
@ -32,7 +31,6 @@ int ActivationFP32Coder::DoCode(CoderContext *const context) {
MS_CHECK_TRUE(thread_num_ > 0, "thread_num_ <= 0");
int stride = UP_DIV(length, thread_num_);
int count = MSMIN(stride, length - stride * kDefaultTaskId);
Collect(context,
{
"nnacl/fp32/activation_fp32.h",

View File

@ -21,7 +21,6 @@
using mindspore::schema::PrimitiveType_AddN;
namespace mindspore::lite::micro::nnacl {
int AddNFP32Coder::DoCode(CoderContext *const context) {
Tensor *input0 = input_tensors_.at(kInputIndex);
Tensor *input1 = input_tensors_.at(1);
@ -49,5 +48,4 @@ int AddNFP32Coder::DoCode(CoderContext *const context) {
}
REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_AddN, CPUOpCoderCreator<AddNFP32Coder>)
} // namespace mindspore::lite::micro::nnacl

View File

@ -21,7 +21,6 @@
#include "coder/log.h"
namespace mindspore::lite::micro::nnacl {
namespace {
std::string wrap_void(const std::string &a) { return "(void *)(" + a + ")"; }
std::string wrap_uint8(const std::string &a) { return "(uint8_t *)(" + a + ")"; }
@ -370,7 +369,6 @@ int ArithmeticFP32Coder::BatchScalarCalc(int task_id, CoderContext *const contex
int out_offset = out_stride * start_batch;
arithmetic_wrapper_info_ = {offset0, stride0, offset1, stride1, out_offset, out_stride, arithmetic_func_type_};
code->CodeStruct("arithmetic_wrapper_info", arithmetic_wrapper_info_);
code->CodeStruct("arithmetic_parameter", *arithmetic_parameter_);
code->CodeFunction("BatchScalarCalc", wrap_uint8(input0_ptr_str_), wrap_uint8(input1_ptr_str_),

View File

@ -23,7 +23,6 @@
#include "coder/opcoders/parallel.h"
namespace mindspore::lite::micro::nnacl {
int ArithmeticSelfFP32Coder::ReSize() {
data_size_ = input_tensor_->ElementsNum();
thread_sz_count_ = MSMIN(thread_num_, static_cast<int>(data_size_));
@ -109,5 +108,4 @@ REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_Round, CPUOpCo
REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_Neg, CPUOpCoderCreator<ArithmeticSelfFP32Coder>)
REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_Erf, CPUOpCoderCreator<ArithmeticSelfFP32Coder>)
} // namespace mindspore::lite::micro::nnacl

View File

@ -20,7 +20,6 @@
#include "coder/opcoders/serializers/nnacl_serializer/nnacl_fp32_serializer.h"
namespace mindspore::lite::micro::nnacl {
using mindspore::schema::PrimitiveType_AssignAdd;
int AssignAddFP32Coder::Prepare(CoderContext *const context) { return RET_OK; }
@ -51,5 +50,4 @@ int AssignAddFP32Coder::DoCode(CoderContext *const context) {
REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_AssignAdd, CPUOpCoderCreator<AssignAddFP32Coder>)
REG_OPERATOR_CODER(kAllTargets, kNumberTypeInt32, PrimitiveType_AssignAdd, CPUOpCoderCreator<AssignAddFP32Coder>)
} // namespace mindspore::lite::micro::nnacl

View File

@ -25,7 +25,6 @@
using mindspore::schema::PrimitiveType_BatchNorm;
namespace mindspore::lite::micro::nnacl {
int BatchnormFP32Coder::Init() {
auto bn_parameter = reinterpret_cast<BatchNormParameter *>(OperatorCoder::parameter_);
std::vector<int> input_shapes = input_tensor_->shape();
@ -51,9 +50,9 @@ int BatchnormFP32Coder::DoCode(CoderContext *const context) {
MS_LOG(ERROR) << "BatchnormFP32Coder Init error";
return RET_ERROR;
}
MS_CHECK_TRUE(input_tensors_.size() == 3, "inputs size is not equal to three");
MS_CHECK_TRUE(input_tensors_.size() == DIMENSION_3D, "inputs size is not equal to three");
Tensor *mean_tensor = input_tensors_.at(1);
Tensor *var_tensor = input_tensors_.at(2);
Tensor *var_tensor = input_tensors_.at(kInputSize1);
Collect(context,
{
"nnacl/fp32/batchnorm.h",

View File

@ -22,7 +22,6 @@
using mindspore::schema::PrimitiveType_BiasAdd;
namespace mindspore::lite::micro::nnacl {
int BiasAddFP32Coder::Prepare(CoderContext *context) {
arithmetic_parameter_ = reinterpret_cast<ArithmeticParameter *>(parameter_);
size_t data_size = input_tensors_.at(0)->ElementsNum();

View File

@ -22,7 +22,6 @@
using mindspore::schema::PrimitiveType_Concat;
namespace mindspore::lite::micro::nnacl {
int ConcatFP32Coder::Prepare(CoderContext *const context) {
concat_param_ = reinterpret_cast<ConcatParameter *>(parameter_);
return ReSize();

View File

@ -23,7 +23,6 @@
#include "coder/opcoders/nnacl/fp32/convolution_winograd_fp32_coder.h"
using mindspore::schema::PrimitiveType_Conv2DFusion;
namespace mindspore::lite::micro::nnacl {
int ConvDelegateCoder::Prepare(CoderContext *const context) {
// Update shape info of input and output
SetInputOutputShapeInfo(reinterpret_cast<ConvParameter *>(parameter_), input_tensor_, output_tensor_);

View File

@ -30,7 +30,6 @@ int ConvolutionDepthwiseFP32Coder::Prepare(CoderContext *const context) {
}
int ConvolutionDepthwiseFP32Coder::InitWeightBias() {
// init weight: o, h, w, i; o == group, i == 1
auto *origin_weight = reinterpret_cast<float *>(filter_tensor_->data_c());
int channel = filter_tensor_->Batch();
size_t pack_weight_size = filter_tensor_->Batch() * filter_tensor_->Height() * filter_tensor_->Width();
@ -70,7 +69,6 @@ int ConvolutionDepthwiseFP32Coder::DoCode(CoderContext *const context) {
{
"ConvDwFp32Row.S",
});
nnacl::NNaclFp32Serializer code;
// call the op function
code.CodeStruct("conv_parameter", *conv_param_);

View File

@ -143,7 +143,7 @@ int ConvolutionWinogradFP32Coder::InitWeightBias() {
float matrix_b[64];
float matrix_bt[64];
float coef = 1.0f;
if (input_unit_ == 8) {
if (input_unit_ == DIMENSION_8D) {
coef = 0.5f;
}
CookToomFilter(matrix_a, matrix_at, matrix_b, matrix_bt, matrix_g, matrix_gt, coef, output_unit_, kernel_unit_);
@ -183,7 +183,7 @@ std::string ConvolutionWinogradFP32Coder::GetInputTransFunc(int input_unit) {
std::string ConvolutionWinogradFP32Coder::GetOutputTransFunc(int input_unit, int output_unit, ActType act_type) {
std::string res;
if (input_unit == 4 && output_unit < 4) {
if (input_unit == DIMENSION_4D && output_unit < DIMENSION_4D) {
if (act_type == ActType_Relu) {
return OutputTransFuncReluList4.at(output_unit);
} else if (act_type == ActType_Relu6) {
@ -191,7 +191,7 @@ std::string ConvolutionWinogradFP32Coder::GetOutputTransFunc(int input_unit, int
} else {
return OutputTransFuncList4.at(output_unit);
}
} else if (input_unit == 6 && output_unit < 6) {
} else if (input_unit == DIMENSION_6D && output_unit < DIMENSION_6D) {
if (act_type == ActType_Relu) {
return OutputTransFuncReluList6.at(output_unit);
} else if (act_type == ActType_Relu6) {
@ -199,7 +199,7 @@ std::string ConvolutionWinogradFP32Coder::GetOutputTransFunc(int input_unit, int
} else {
return OutputTransFuncList6.at(output_unit);
}
} else if (input_unit == 8 && output_unit < 8) {
} else if (input_unit == DIMENSION_8D && output_unit < DIMENSION_8D) {
if (act_type == ActType_Relu) {
return OutputTransFuncReluList8.at(output_unit);
} else if (act_type == ActType_Relu6) {

View File

@ -25,7 +25,6 @@
using mindspore::schema::PrimitiveType_Gather;
namespace mindspore::lite::micro::nnacl {
int GatherFP32Coder::Prepare(CoderContext *const context) { return RET_OK; }
int GatherFP32Coder::DoCode(CoderContext *context) {

View File

@ -43,7 +43,7 @@ int MatMulFP32BaseCoder::ReSize() {
}
int MatMulFP32BaseCoder::InitBiasData() {
if (input_tensors_.size() == 3) {
if (input_tensors_.size() == DIMENSION_3D) {
int max_bias_data = params_->col_align_;
bias_pack_ptr_size_ = static_cast<size_t>(max_bias_data * sizeof(float));
if (bias_tensor_->ElementsNum() == 1) {

View File

@ -23,7 +23,6 @@
using mindspore::schema::PrimitiveType_MatMul;
namespace mindspore::lite::micro::nnacl {
int MatMulFP32Coder::InitShapeA() {
std::vector<int> a_shape = input_tensor_->shape();
int a_shape_size = static_cast<int>(a_shape.size());
@ -32,12 +31,12 @@ int MatMulFP32Coder::InitShapeA() {
return RET_ERROR;
}
int batch = 1;
for (int i = 0; i < a_shape_size - 2; ++i) {
for (int i = 0; i < a_shape_size - DIMENSION_2D; ++i) {
batch *= a_shape.at(i);
}
params_->batch = batch;
params_->row_ = params_->a_transpose_ ? a_shape.at(a_shape_size - 1) : a_shape.at(a_shape_size - 2);
params_->deep_ = params_->a_transpose_ ? a_shape.at(a_shape_size - 2) : a_shape.at(a_shape_size - 1);
params_->row_ = params_->a_transpose_ ? a_shape.at(a_shape_size - 1) : a_shape.at(a_shape_size - DIMENSION_2D);
params_->deep_ = params_->a_transpose_ ? a_shape.at(a_shape_size - DIMENSION_2D) : a_shape.at(a_shape_size - 1);
return RET_OK;
}
@ -49,12 +48,12 @@ int MatMulFP32Coder::InitShapeB() {
return RET_ERROR;
}
int batch = 1;
for (int i = 0; i < b_shape_size - 2; ++i) {
for (int i = 0; i < b_shape_size - DIMENSION_2D; ++i) {
batch *= b_shape.at(i);
}
params_->batch = batch;
params_->col_ = params_->b_transpose_ ? b_shape.at(b_shape_size - 2) : b_shape.at(b_shape_size - 1);
params_->deep_ = params_->b_transpose_ ? b_shape.at(b_shape_size - 1) : b_shape.at(b_shape_size - 2);
params_->col_ = params_->b_transpose_ ? b_shape.at(b_shape_size - DIMENSION_2D) : b_shape.at(b_shape_size - 1);
params_->deep_ = params_->b_transpose_ ? b_shape.at(b_shape_size - 1) : b_shape.at(b_shape_size - DIMENSION_2D);
return RET_OK;
}

View File

@ -35,7 +35,7 @@ int Nchw2NhwcFP32Coder::DoCode(CoderContext *context) {
"nnacl/pack.c",
});
NNaclFp32Serializer code;
if (input_tensor_->shape().size() == 4) {
if (input_tensor_->shape().size() == DIMENSION_4D) {
if (input_tensor_->data_type() == kNumberTypeFloat32) {
code.CodeFunction("PackNCHWToNHWCFp32", input_tensor_, output_tensor_, output_tensor_->Batch(),
output_tensor_->Height() * output_tensor_->Width(), output_tensor_->Channel());

View File

@ -34,7 +34,7 @@ int Nhwc2NchwFP32Coder::DoCode(CoderContext *const context) {
});
NNaclFp32Serializer code;
if (input_tensor_->shape().size() == 4) {
if (input_tensor_->shape().size() == DIMENSION_4D) {
if (input_tensor_->data_type() == kNumberTypeFloat32) {
code.CodeFunction("PackNHWCToNCHWFp32", input_tensor_, output_tensor_, output_tensor_->Batch(),
output_tensor_->Height() * output_tensor_->Width(), output_tensor_->Channel());

View File

@ -25,7 +25,6 @@
using mindspore::schema::PrimitiveType_PadFusion;
namespace mindspore::lite::micro::nnacl {
int PadFP32Coder::Prepare(CoderContext *const context) {
pad_param_ = reinterpret_cast<PadParameter *>(parameter_);
return ReSize();

View File

@ -26,7 +26,6 @@ using mindspore::schema::PrimitiveType_AvgPoolFusion;
using mindspore::schema::PrimitiveType_MaxPoolFusion;
namespace mindspore::lite::micro::nnacl {
int PoolingFP32Coder::DoCode(CoderContext *const context) {
// attribute
auto pooling_parameter = reinterpret_cast<PoolingParameter *>(parameter_);

View File

@ -24,7 +24,6 @@
using mindspore::schema::PrimitiveType_PowFusion;
namespace mindspore::lite::micro::nnacl {
int PowerFP32Coder::DoCode(CoderContext *const context) {
scale_ = reinterpret_cast<PowerParameter *>(parameter_)->scale_;
shift_ = reinterpret_cast<PowerParameter *>(parameter_)->shift_;
@ -37,13 +36,13 @@ int PowerFP32Coder::DoCode(CoderContext *const context) {
int len = MSMIN(stride, size - stride * kDefaultTaskId);
std::string exp_addr;
bool broadcast = true;
if (input_tensors_.size() == 2) {
if (input_tensors_.size() == DIMENSION_2D) {
exp_addr = allocator_->GetRuntimeAddr(filter_tensor, true);
broadcast = !(input_tensor_->shape() == filter_tensor->shape());
}
std::string cur_exp_str;
if (broadcast) {
cur_exp_str = input_tensors_.size() == 2 ? exp_addr : "&power";
cur_exp_str = input_tensors_.size() == DIMENSION_2D ? exp_addr : "&power";
} else {
cur_exp_str = exp_addr;
}

View File

@ -21,7 +21,6 @@
#include "coder/opcoders/file_collector.h"
using mindspore::schema::PrimitiveType_ReduceFusion;
namespace mindspore::lite::micro::nnacl {
int ReduceFP32Coder::Prepare(CoderContext *const context) {
MS_CHECK_RET_CODE(ReduceBaseCoder::Init(), "init failed");

View File

@ -23,7 +23,6 @@
using mindspore::schema::PrimitiveType_ScaleFusion;
namespace mindspore::lite::micro::nnacl {
int ScaleFP32Coder::InitScaleOffset() {
Tensor *scale_tensor = input_tensors_.at(kWeightIndex);
MS_CHECK_PTR(scale_tensor);
@ -40,15 +39,15 @@ int ScaleFP32Coder::InitScaleOffset() {
scale_ = nullptr;
}
if (input_tensors_.size() == 2) {
if (input_tensors_.size() == DIMENSION_2D) {
scale_param_->const_offset_ = true;
offset_ =
reinterpret_cast<float *>(allocator_->Malloc(kNumberTypeFloat32, scale_tensor->Size(), kOfflinePackWeight));
MS_CHECK_PTR(offset_);
MS_CHECK_RET_CODE(memset_s(offset_, scale_tensor->Size(), 0, scale_tensor->Size()), "memset_s failed!");
} else if (input_tensors_.size() == 3 && input_tensors_.at(2)->data_c() != nullptr) {
} else if (input_tensors_.size() == DIMENSION_3D && input_tensors_.at(kBiasIndex)->data_c() != nullptr) {
scale_param_->const_offset_ = true;
Tensor *offset_tensor = input_tensors_.at(2);
Tensor *offset_tensor = input_tensors_.at(kBiasIndex);
MS_CHECK_PTR(offset_tensor);
offset_ =
reinterpret_cast<float *>(allocator_->Malloc(kNumberTypeFloat32, offset_tensor->Size(), kOfflinePackWeight));
@ -98,7 +97,7 @@ int ScaleFP32Coder::CalculateParameter() {
int ScaleFP32Coder::Prepare(CoderContext *const context) {
this->scale_param_ = reinterpret_cast<ScaleParameter *>(parameter_);
if (input_tensors_.size() < 2 || input_tensors_.size() > 3) {
if (input_tensors_.size() < DIMENSION_2D || input_tensors_.size() > DIMENSION_3D) {
MS_LOG(ERROR) << "inputs to Scale operator should be 2 or 3, but " << input_tensors_.size() << " is given.";
return RET_ERROR;
}

View File

@ -23,7 +23,6 @@
using mindspore::schema::PrimitiveType_Softmax;
namespace mindspore::lite::micro::nnacl {
int SoftMaxFP32Coder::Prepare(CoderContext *const context) {
SoftmaxBaseCoder::Init();
// malloc tmp buffer
@ -61,7 +60,6 @@ int SoftMaxFP32Coder::DoCode(CoderContext *const context) {
code.CodeFunction("memset", sum_data_, "0", sum_data_size_);
code.CodeFunction("Softmax", input_tensor_, output_tensor_, sum_data_, "&softmax_parameter");
context->AppendCode(code.str());
return RET_OK;
}

View File

@ -64,7 +64,6 @@ int TileFP32Coder::DoCode(CoderContext *const context) {
code.CodeStruct("tile_parameter", *tile_param_);
// call the op function
code.CodeFunction("Tile", input_tensor_, output_tensor_, "&tile_parameter");
context->AppendCode(code.str());
return RET_OK;
}

View File

@ -22,7 +22,6 @@
using mindspore::schema::PrimitiveType_Transpose;
namespace mindspore::lite::micro::nnacl {
int TransposeFp32Coder::Resize() {
if (input_tensors_.size() == 2) {
param_->num_axes_ = input_tensors_.at(1)->ElementsNum();
@ -31,7 +30,7 @@ int TransposeFp32Coder::Resize() {
return RET_OK;
}
// get perm data
MS_ASSERT(input_tensors_.size() == 2);
MS_ASSERT(input_tensors_.size() == DIMENSION_2D);
auto perm_tensor = input_tensors_.at(1);
int *perm_data = reinterpret_cast<int *>(perm_tensor->data_c());
MS_ASSERT(perm_data != nullptr);

View File

@ -24,7 +24,6 @@
using mindspore::schema::PrimitiveType_Activation;
namespace mindspore::lite::micro::nnacl {
std::unique_ptr<OperatorCoder> CPUActivationINT8CoderCreator(const std::vector<Tensor *> &in_tensors,
const std::vector<Tensor *> &out_tensors,
const Model::Node *node, size_t node_index,

View File

@ -26,6 +26,7 @@
using mindspore::schema::PrimitiveType_AddFusion;
namespace mindspore::lite::micro::nnacl {
constexpr int kLeftShift = 20;
int AddInt8Coder::Prepare(CoderContext *const context) {
input0 = input_tensors().at(0);
@ -49,7 +50,7 @@ int AddInt8Coder::Init() {
const double in1_scale = input1->quant_params().front().scale;
const double out_scale = output_tensor_->quant_params().front().scale;
para_.left_shift_ = 20;
para_.left_shift_ = kLeftShift;
const double twice_max_input_scale = 2 * std::max(in0_scale, in1_scale);
const double in0_multiplier = in0_scale / twice_max_input_scale;
const double in1_multiplier = in1_scale / twice_max_input_scale;