fix warning error of generated code

This commit is contained in:
yangjie159 2021-03-24 16:28:35 +08:00
parent 17384bfbba
commit a54d6ca8ff
8 changed files with 39 additions and 35 deletions

View File

@ -83,9 +83,9 @@ void PrintData(void *data, size_t data_number) {
void TensorToString(tensor::MSTensor *tensor) {
printf("name: %s, ", tensor->tensor_name().c_str());
printf(", DataType: %d", tensor->data_type());
printf(", Elements: %d", tensor->ElementsNum());
printf(", Shape: [");
printf("DataType: %d, ", tensor->data_type());
printf("Elements: %d, ", tensor->ElementsNum());
printf("Shape: [");
for (auto &dim : tensor->shape()) {
printf("%d ", dim);
}

View File

@ -111,7 +111,9 @@ int DetectionPostProcessBaseCoder::AllocateBuffer() {
}
int DetectionPostProcessBaseCoder::DoCode(CoderContext *const context) {
Collect(context, {"nnacl/detection_post_process_parameter.h", "wrapper/base/detection_post_process_base_wrapper.h"},
Collect(context,
{"nnacl/detection_post_process_parameter.h", "nnacl/fp32/detection_post_process_fp32.h",
"wrapper/base/detection_post_process_base_wrapper.h"},
{"detection_post_process_fp32.c", "detection_post_process_base_wrapper.c"});
Serializer code;

View File

@ -46,7 +46,8 @@ int Conv2D1x1Int8Coder::DoCode(CoderContext *const context) {
{"nnacl/int8/conv1x1_int8.h", "nnacl/common_func.h", "wrapper/int8/conv1x1_init_int8_wrapper.h",
"wrapper/int8/conv1x1_run_int8_wrapper.h"},
{"common_func.c", "pack_int8.c", "conv1x1_int8.c", "matmul_int8.c", "fixed_point.c",
"conv1x1_init_int8_wrapper.c", "conv1x1_run_int8_wrapper.c", "conv1x1_base.c"});
"conv1x1_init_int8_wrapper.c", "conv1x1_run_int8_wrapper.c", "conv1x1_base.c"},
{"MatmulInt8Opt.S"});
nnacl::NNaclInt8Serializer code;

View File

@ -190,7 +190,8 @@ int Conv2DINT8Coder::DoCode(CoderContext *const context) {
}
Collect(context,
{"nnacl/int8/conv_int8.h", "nnacl/common_func.h", "wrapper/int8/convolution_int8_wrapper.h",
"wrapper/base/common_wrapper.h", "wrapper/base/optimize_handler_wrapper.h"},
"wrapper/int8/conv_init_int8_wrapper.h", "wrapper/base/common_wrapper.h",
"wrapper/base/optimize_handler_wrapper.h"},
{"common_func.c", "pack_int8.c", "conv_int8.c", "winograd_transform.c", "matmul_int8.c", "fixed_point.c",
"convolution_int8_wrapper.c", "conv_init_int8_wrapper.c", "common_wrapper.c", "optimize_handler_wrapper.c"},
asm_files);

View File

@ -66,8 +66,8 @@ int ResizeInt8Coder::DoCode(CoderContext *const context) {
Collect(context, headers, cFiles);
nnacl::NNaclInt8Serializer code;
code.CodeArray("input_shape", input_tensor_->shape().data(), input_tensor_->shape().size(), false);
code.CodeArray("output_shape", output_tensor_->shape().data(), output_tensor_->shape().size(), false);
code.CodeArray("input_shape", input_tensor_->shape().data(), input_tensor_->shape().size(), true);
code.CodeArray("output_shape", output_tensor_->shape().data(), output_tensor_->shape().size(), true);
switch (method_) {
case static_cast<int>(schema::ResizeMethod_LINEAR): {
MS_LOG(ERROR) << "unsupported: " << schema::EnumNameResizeMethod(static_cast<schema::ResizeMethod>(method_));
@ -78,7 +78,7 @@ int ResizeInt8Coder::DoCode(CoderContext *const context) {
bool same_scale = abs(quant_out_->scale_ - quant_in_->scale_) < 1e-6;
bool align_corners = coordinate_transform_mode_ == schema::CoordinateTransformMode_ALIGN_CORNERS;
if (same_zp && same_scale) {
code.CodeBaseStruct("ResizeInt8Args", kRunArgs, input_tensor_, output_tensor_, "&input_shape", "&output_shape",
code.CodeBaseStruct("ResizeInt8Args", kRunArgs, input_tensor_, output_tensor_, "input_shape", "output_shape",
align_corners, gThreadNum);
if (support_parallel_) {
code.CodeFunction(kParallelLaunch, gThreadPool, "ResizeInt8Run", kRunArgsAddr, gThreadNum);

View File

@ -34,7 +34,7 @@ constexpr auto gThreadPool = "g_thread_pool";
// args represents the parameters required for operator to run
constexpr auto kRunArgs = "args";
constexpr auto kRunArgsAddr = "&args";
constexpr auto kRunArgsAddr = "(void *)&args";
} // namespace mindspore::lite::micro

View File

@ -62,31 +62,31 @@ void NNaclInt8Serializer::CodeStruct(const std::string &name, const ConvParamete
}
void NNaclInt8Serializer::CodeStruct(const std::string &name, const MatMulParameter &matmul_parameter) {
CodeBaseStruct("MatMulParameter", name, matmul_parameter.op_parameter_, matmul_parameter.has_bias_,
matmul_parameter.row_, matmul_parameter.col_, matmul_parameter.row_4_, matmul_parameter.row_6_,
matmul_parameter.row_12_, matmul_parameter.row_16_, matmul_parameter.row_align_,
matmul_parameter.col_4_, matmul_parameter.col_8_, matmul_parameter.col_align_, matmul_parameter.deep_,
matmul_parameter.deep_4_, matmul_parameter.deep_16_, matmul_parameter.batch,
matmul_parameter.a_transpose_, matmul_parameter.b_transpose_, matmul_parameter.a_const_,
matmul_parameter.b_const_, matmul_parameter.act_type_);
CodeBaseStruct<false>("MatMulParameter", name, matmul_parameter.op_parameter_, matmul_parameter.has_bias_,
matmul_parameter.row_, matmul_parameter.col_, matmul_parameter.row_4_, matmul_parameter.row_6_,
matmul_parameter.row_12_, matmul_parameter.row_16_, matmul_parameter.row_align_,
matmul_parameter.col_4_, matmul_parameter.col_8_, matmul_parameter.col_align_,
matmul_parameter.deep_, matmul_parameter.deep_4_, matmul_parameter.deep_16_,
matmul_parameter.batch, matmul_parameter.a_transpose_, matmul_parameter.b_transpose_,
matmul_parameter.a_const_, matmul_parameter.b_const_, matmul_parameter.act_type_);
}
void NNaclInt8Serializer::CodeStruct(const std::string &name, const AddQuantParameter &add_quant_parameter) {
CodeBaseStruct("AddQuantParameter", name, add_quant_parameter.left_shift_, add_quant_parameter.min_,
add_quant_parameter.max_, add_quant_parameter.in0_args_, add_quant_parameter.in1_args_,
add_quant_parameter.out_zp_, add_quant_parameter.out_left_shift_, add_quant_parameter.out_right_shift_,
add_quant_parameter.out_multiplier_);
CodeBaseStruct<false>("AddQuantParameter", name, add_quant_parameter.left_shift_, add_quant_parameter.min_,
add_quant_parameter.max_, add_quant_parameter.in0_args_, add_quant_parameter.in1_args_,
add_quant_parameter.out_zp_, add_quant_parameter.out_left_shift_,
add_quant_parameter.out_right_shift_, add_quant_parameter.out_multiplier_);
}
void NNaclInt8Serializer::CodeStruct(const std::string &name, const ArithmeticParameter &arithmetic_parameter) {
CodeBaseStruct("ArithmeticParameter", name, arithmetic_parameter.op_parameter_, arithmetic_parameter.broadcasting_,
arithmetic_parameter.ndim_, arithmetic_parameter.activation_type_,
ToString(arithmetic_parameter.in_shape0_), arithmetic_parameter.in_elements_num0_,
ToString(arithmetic_parameter.in_shape1_), arithmetic_parameter.in_elements_num1_,
ToString(arithmetic_parameter.out_shape_), arithmetic_parameter.out_elements_num_,
ToString(arithmetic_parameter.in_strides0_), ToString(arithmetic_parameter.in_strides1_),
ToString(arithmetic_parameter.out_strides_), ToString(arithmetic_parameter.multiples0_),
ToString(arithmetic_parameter.multiples1_));
CodeBaseStruct<false>("ArithmeticParameter", name, arithmetic_parameter.op_parameter_,
arithmetic_parameter.broadcasting_, arithmetic_parameter.ndim_,
arithmetic_parameter.activation_type_, ToString(arithmetic_parameter.in_shape0_),
arithmetic_parameter.in_elements_num0_, ToString(arithmetic_parameter.in_shape1_),
arithmetic_parameter.in_elements_num1_, ToString(arithmetic_parameter.out_shape_),
arithmetic_parameter.out_elements_num_, ToString(arithmetic_parameter.in_strides0_),
ToString(arithmetic_parameter.in_strides1_), ToString(arithmetic_parameter.out_strides_),
ToString(arithmetic_parameter.multiples0_), ToString(arithmetic_parameter.multiples1_));
}
void NNaclInt8Serializer::CodeStruct(const std::string &name, const PoolingParameter &pooling_parameter) {
@ -161,10 +161,10 @@ void NNaclInt8Serializer::CodeStruct(const std::string &name, const ConcatParame
auto get_shape_name = [&input_shapes_name](int i) { return input_shapes_name + "_" + std::to_string(i); };
// input_shape
for (int i = 0; i < in_tensor_count; ++i) {
CodeArray(get_shape_name(i), concat_parameter.input_shapes_[i], in_shape);
CodeArray(get_shape_name(i), concat_parameter.input_shapes_[i], in_shape, false);
}
code << "const int *" << input_shapes_name << "[] = {";
code << "int *" << input_shapes_name << "[] = {";
for (int i = 0; i < in_tensor_count; ++i) {
code << get_shape_name(i) << " ,";
}
@ -172,9 +172,9 @@ void NNaclInt8Serializer::CodeStruct(const std::string &name, const ConcatParame
// output_shape
CodeArray(output_shapes_name, concat_parameter.output_shapes_, out_shape, false);
CodeBaseStruct("ConcatParameter", name, concat_parameter.op_parameter_, quant_arg_name, concat_parameter.axis_,
concat_parameter.thread_count_, concat_parameter.input_num_, input_shapes_name, output_shapes_name,
concat_parameter.after_axis_size, concat_parameter.count_unit_);
CodeBaseStruct<false>("ConcatParameter", name, concat_parameter.op_parameter_, quant_arg_name, concat_parameter.axis_,
concat_parameter.thread_count_, concat_parameter.input_num_, input_shapes_name,
output_shapes_name, concat_parameter.after_axis_size, concat_parameter.count_unit_);
}
void NNaclInt8Serializer::CodeStruct(const std::string &name, const ::QuantArg &quant_arg) {

View File

@ -108,7 +108,7 @@ class Serializer {
if (is_const) {
code << "const " << type << " " << name << "[" << length << "] = {";
} else {
code << "static " << type << " " << name << "[" << length << "] = {";
code << type << " " << name << "[" << length << "] = {";
}
for (int i = 0; i < length - 1; ++i) {
code << data[i] << ", ";