forked from mindspore-Ecosystem/mindspore
fix static checking of lite ops
This commit is contained in:
parent
460a708ede
commit
765afb2bb1
|
@ -15,7 +15,6 @@
|
|||
*/
|
||||
|
||||
#include "nnacl/common_func.h"
|
||||
#include "nnacl/quantization/fixed_point.h"
|
||||
|
||||
int offset(const int *shape, const int dim0, const int dim1, const int dim2, const int dim3) {
|
||||
return ((dim0 * shape[1] + dim1) * shape[2] + dim2) * shape[3] + dim3;
|
||||
|
|
|
@ -21,7 +21,7 @@
|
|||
#include "nnacl/fp32/matmul.h"
|
||||
|
||||
// fp32 conv common
|
||||
void ConvFp32(float *input_data, float *packed_input, const float *packed_weight, const float *bias_data,
|
||||
void ConvFp32(const float *input_data, float *packed_input, const float *packed_weight, const float *bias_data,
|
||||
float *col_major_input, float *output_data, int task_id, ConvParameter *conv_param) {
|
||||
int kernel_h = conv_param->kernel_h_;
|
||||
int kernel_w = conv_param->kernel_w_;
|
||||
|
@ -70,7 +70,7 @@ void ConvFp32(float *input_data, float *packed_input, const float *packed_weight
|
|||
}
|
||||
|
||||
// fp32 conv winograd
|
||||
void ConvWinogardFp32(float *input_data, const float *trans_weight, const float *bias_data, float *output_data,
|
||||
void ConvWinogardFp32(const float *input_data, const float *trans_weight, const float *bias_data, float *output_data,
|
||||
TmpBufferAddress *buffer_list, int task_id, ConvParameter *conv_param, InputTransFunc in_func,
|
||||
OutputTransFunc out_func) {
|
||||
int thread_num = conv_param->thread_num_;
|
||||
|
|
|
@ -34,11 +34,11 @@ extern "C" {
|
|||
#endif
|
||||
|
||||
// fp32 convolution common (im2col+gemm)
|
||||
void ConvFp32(float *input_data, float *packed_input, const float *packed_weight, const float *bias_data,
|
||||
void ConvFp32(const float *input_data, float *packed_input, const float *packed_weight, const float *bias_data,
|
||||
float *col_major_input, float *output_data, int task_id, ConvParameter *conv_param);
|
||||
|
||||
// fp32 convolution winograd
|
||||
void ConvWinogardFp32(float *input_data, const float *trans_weight, const float *bias_data, float *output_data,
|
||||
void ConvWinogardFp32(const float *input_data, const float *trans_weight, const float *bias_data, float *output_data,
|
||||
TmpBufferAddress *buffer_list, int task_id, ConvParameter *conv_param, InputTransFunc in_func,
|
||||
OutputTransFunc out_func);
|
||||
#ifdef __cplusplus
|
||||
|
|
|
@ -50,6 +50,7 @@ int PackDeConvWgDataFp32(float *nhwc_weight, DeConvComputeUnit *unit, ConvParame
|
|||
DECONV_WINOGRAD_DEFAULT_UNIT, unit->h_size_);
|
||||
if (ret != NNACL_OK) {
|
||||
free(current_unit_weight);
|
||||
current_unit_weight = NULL;
|
||||
return NNACL_ERRCODE_WINOGRAD_GENERATOR_ERROR;
|
||||
}
|
||||
|
||||
|
@ -58,6 +59,7 @@ int PackDeConvWgDataFp32(float *nhwc_weight, DeConvComputeUnit *unit, ConvParame
|
|||
if (unit->winograd_.AT_ == NULL) {
|
||||
if (current_unit_weight != NULL) {
|
||||
free(current_unit_weight);
|
||||
current_unit_weight = NULL;
|
||||
}
|
||||
return NNACL_NULL_PTR;
|
||||
}
|
||||
|
@ -68,9 +70,11 @@ int PackDeConvWgDataFp32(float *nhwc_weight, DeConvComputeUnit *unit, ConvParame
|
|||
if (unit->winograd_.BT_ == NULL) {
|
||||
if (current_unit_weight != NULL) {
|
||||
free(current_unit_weight);
|
||||
current_unit_weight = NULL;
|
||||
}
|
||||
if (unit->winograd_.AT_ != NULL) {
|
||||
free(unit->winograd_.AT_);
|
||||
unit->winograd_.AT_ = NULL;
|
||||
}
|
||||
return NNACL_NULL_PTR;
|
||||
}
|
||||
|
@ -82,12 +86,15 @@ int PackDeConvWgDataFp32(float *nhwc_weight, DeConvComputeUnit *unit, ConvParame
|
|||
if (winograd_unit_weight == NULL) {
|
||||
if (current_unit_weight != NULL) {
|
||||
free(current_unit_weight);
|
||||
current_unit_weight = NULL;
|
||||
}
|
||||
if (unit->winograd_.AT_ != NULL) {
|
||||
free(unit->winograd_.AT_);
|
||||
unit->winograd_.AT_ = NULL;
|
||||
}
|
||||
if (unit->winograd_.BT_ != NULL) {
|
||||
free(unit->winograd_.BT_);
|
||||
unit->winograd_.BT_ = NULL;
|
||||
}
|
||||
return NNACL_NULL_PTR;
|
||||
}
|
||||
|
@ -97,6 +104,7 @@ int PackDeConvWgDataFp32(float *nhwc_weight, DeConvComputeUnit *unit, ConvParame
|
|||
/* reset weight data & info */
|
||||
tmp_kernel_plane = unit->winograd_.kh_ * unit->winograd_.kw_;
|
||||
free(current_unit_weight);
|
||||
current_unit_weight = NULL;
|
||||
current_unit_weight = winograd_unit_weight;
|
||||
winograd_unit_weight = NULL;
|
||||
}
|
||||
|
@ -119,6 +127,7 @@ int PackDeConvWgDataFp32(float *nhwc_weight, DeConvComputeUnit *unit, ConvParame
|
|||
|
||||
if (current_unit_weight != NULL) {
|
||||
free(current_unit_weight);
|
||||
current_unit_weight = NULL;
|
||||
}
|
||||
return NNACL_OK;
|
||||
}
|
||||
|
@ -332,7 +341,7 @@ void DeConvWgMerge(const float *src, float *dst, size_t src_stride, size_t dst_s
|
|||
}
|
||||
|
||||
void _deConvWinograd(const float *tile_in, float *tile_out, float *weight_buf, float *tmp_buf, float *at_buf,
|
||||
float *a_mid_buf, float *trans_a_buf, bool *transfered, float *bt_buf, float *b_tmp_buf,
|
||||
float *a_mid_buf, float *trans_a_buf, bool *transfered, const float *bt_buf, float *b_tmp_buf,
|
||||
int unit_size, int w_start, int h_start, ConvParameter *conv_param, DeConvParam *deconv_param) {
|
||||
int winograd_plane = unit_size * unit_size;
|
||||
if (!transfered[unit_size]) {
|
||||
|
|
|
@ -22,7 +22,7 @@ void Calculate_Data(const float *input_data, float *output_data, int num, EluPar
|
|||
output_data[num] = input_data[num] < 0 ? parameter->alpha_ * expm1(input_data[num]) : input_data[num];
|
||||
}
|
||||
|
||||
int Elu(float *input_data, float *output_data, EluParameter *parameter, int task_id) {
|
||||
int Elu(const float *input_data, float *output_data, EluParameter *parameter, int task_id) {
|
||||
for (size_t i = task_id; i < parameter->in_size_; i += parameter->thread_num_) {
|
||||
Calculate_Data(input_data, output_data, i, parameter);
|
||||
}
|
||||
|
|
|
@ -29,7 +29,7 @@ typedef struct EluParameter {
|
|||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
int Elu(float *input_data, float *output_data, EluParameter *parameter, int task_id);
|
||||
int Elu(const float *input_data, float *output_data, EluParameter *parameter, int task_id);
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
#include <string.h>
|
||||
#include "nnacl/errorcode.h"
|
||||
|
||||
int GatherNd(const float *input, float *output, int *in_offset, int area, int count) {
|
||||
int GatherNd(const float *input, float *output, const int *in_offset, int area, int count) {
|
||||
int i = 0;
|
||||
for (i = 0; i < count; i++) {
|
||||
(void)memcpy(output + area * i, input + in_offset[i], area * sizeof(float));
|
||||
|
|
|
@ -27,7 +27,7 @@ typedef struct GatherNdParameter {
|
|||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
int GatherNd(const float *input, float *output, int *in_offset, int area, int count);
|
||||
int GatherNd(const float *input, float *output, const int *in_offset, int area, int count);
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -79,13 +79,13 @@ void ElementMulAcc(const float *input0, const float *input1, float *output, int
|
|||
}
|
||||
}
|
||||
|
||||
void UpdataState(float *cell_state, float *forget_gate, const float *input_gate, float *cell_gate, int batch,
|
||||
void UpdataState(float *cell_state, const float *forget_gate, const float *input_gate, float *cell_gate, int batch,
|
||||
int hidden_size) {
|
||||
ElementMul(forget_gate, cell_state, cell_state, batch * hidden_size);
|
||||
ElementMulAcc(input_gate, cell_gate, cell_state, batch * hidden_size);
|
||||
}
|
||||
|
||||
void UpdataOutput(const float *cell_state, float *output_gate, float *hidden_state, int batch, int hidden_size) {
|
||||
void UpdataOutput(const float *cell_state, const float *output_gate, float *hidden_state, int batch, int hidden_size) {
|
||||
Tanh(cell_state, batch * hidden_size, hidden_state);
|
||||
ElementMul(hidden_state, output_gate, hidden_state, batch * hidden_size);
|
||||
}
|
||||
|
|
|
@ -65,7 +65,7 @@ int PrepareResizeBilinear(const int *input_shape, const int *output_shape, bool
|
|||
}
|
||||
|
||||
int ResizeBilinear(const float *input_data, float *output_data, const int *input_shape, const int *output_shape,
|
||||
int *y_bottoms, const int *y_tops, int *x_lefts, int *x_rights, float *y_bottom_weights,
|
||||
const int *y_bottoms, const int *y_tops, int *x_lefts, int *x_rights, float *y_bottom_weights,
|
||||
float *x_left_weights, int n_h_begin, int n_h_end) {
|
||||
if (input_data == NULL || output_data == NULL || input_shape == NULL || output_shape == NULL || y_bottoms == NULL ||
|
||||
y_tops == NULL || x_lefts == NULL || x_rights == NULL || y_bottom_weights == NULL || x_left_weights == NULL) {
|
||||
|
@ -154,7 +154,7 @@ int ResizeBilinear(const float *input_data, float *output_data, const int *input
|
|||
return NNACL_OK;
|
||||
}
|
||||
|
||||
int InterpRow(const float *src_line, float *linear_output, int new_width, float *x_left_weights, int *x_lefts,
|
||||
int InterpRow(const float *src_line, float *linear_output, int new_width, float *x_left_weights, const int *x_lefts,
|
||||
const int *x_rights, int in_c) {
|
||||
int w;
|
||||
for (w = 0; w < new_width; w++) {
|
||||
|
@ -208,7 +208,7 @@ int InterpCol(const float *bottom_line, const float *top_line, float *output, in
|
|||
}
|
||||
|
||||
int ResizeBilinear2(const float *input_data, float *output_data, const int *input_shape, const int *output_shape,
|
||||
int *y_bottoms, const int *y_tops, int *x_lefts, int *x_rights, float *y_bottom_weights,
|
||||
const int *y_bottoms, const int *y_tops, int *x_lefts, int *x_rights, float *y_bottom_weights,
|
||||
float *x_left_weights, float *line0, float *line1, int n_h_begin, int n_h_end) {
|
||||
if (input_data == NULL || output_data == NULL || input_shape == NULL || output_shape == NULL || y_bottoms == NULL ||
|
||||
y_tops == NULL || x_lefts == NULL || x_rights == NULL || y_bottom_weights == NULL || x_left_weights == NULL) {
|
||||
|
|
|
@ -30,11 +30,11 @@ int PrepareResizeBilinear(const int *input_shape, const int *output_shape, bool
|
|||
int *y_tops, int *x_lefts, int *x_rights, float *y_bottom_weights, float *x_left_weights);
|
||||
|
||||
int ResizeBilinear(const float *input_data, float *output_data, const int *input_shape, const int *output_shape,
|
||||
int *y_bottoms, const int *y_tops, int *x_lefts, int *x_rights, float *y_bottom_weights,
|
||||
const int *y_bottoms, const int *y_tops, int *x_lefts, int *x_rights, float *y_bottom_weights,
|
||||
float *x_left_weights, int n_h_begin, int n_h_end);
|
||||
|
||||
int ResizeBilinear2(const float *input_data, float *output_data, const int *input_shape, const int *output_shape,
|
||||
int *y_bottoms, const int *y_tops, int *x_lefts, int *x_rights, float *y_bottom_weights,
|
||||
const int *y_bottoms, const int *y_tops, int *x_lefts, int *x_rights, float *y_bottom_weights,
|
||||
float *x_left_weights, float *line0, float *line1, int n_h_begin, int n_h_end);
|
||||
|
||||
int ResizeNearestNeighbor(const float *input_data, float *output_data, const int *input_shape, const int *output_shape,
|
||||
|
|
|
@ -18,8 +18,8 @@
|
|||
#ifdef ENABLE_ARM
|
||||
#include <arm_neon.h>
|
||||
#endif
|
||||
void ScaleInner(float *in_data, float *out_data, const float *scale, float *offset, int outer_start, int outer_end,
|
||||
int axis_size, int inner_size) {
|
||||
void ScaleInner(float *in_data, float *out_data, const float *scale, const float *offset, int outer_start,
|
||||
int outer_end, int axis_size, int inner_size) {
|
||||
for (int out = outer_start; out < outer_end; out++) {
|
||||
int out_offset = out * axis_size * inner_size;
|
||||
for (int i = 0; i < axis_size; i++) {
|
||||
|
@ -43,7 +43,7 @@ void ScaleInner(float *in_data, float *out_data, const float *scale, float *offs
|
|||
}
|
||||
}
|
||||
|
||||
void ScaleAxis(float *in_data, float *out_data, const float *scale, float *offset, int outer_start, int outer_end,
|
||||
void ScaleAxis(float *in_data, float *out_data, const float *scale, const float *offset, int outer_start, int outer_end,
|
||||
int axis_size) {
|
||||
for (int out = outer_start; out < outer_end; out++) {
|
||||
int out_offset = out * axis_size;
|
||||
|
@ -65,7 +65,8 @@ void ScaleAxis(float *in_data, float *out_data, const float *scale, float *offse
|
|||
}
|
||||
}
|
||||
|
||||
void DoScale(float *in_data, float *out_data, float *scale, float *offset, int task_id, ScaleParameter *scale_param) {
|
||||
void DoScale(float *in_data, float *out_data, const float *scale, float *offset, int task_id,
|
||||
ScaleParameter *scale_param) {
|
||||
int outer_step = UP_DIV(scale_param->outer_size_, scale_param->op_parameter_.thread_num_);
|
||||
int outer_start = task_id * outer_step;
|
||||
int outer_end = MSMIN(outer_start + outer_step, scale_param->outer_size_);
|
||||
|
@ -78,8 +79,8 @@ void DoScale(float *in_data, float *out_data, float *scale, float *offset, int t
|
|||
}
|
||||
}
|
||||
|
||||
void ScaleInnerRelu(float *in_data, float *out_data, const float *scale, float *offset, int outer_start, int outer_end,
|
||||
int axis_size, int inner_size) {
|
||||
void ScaleInnerRelu(float *in_data, float *out_data, const float *scale, const float *offset, int outer_start,
|
||||
int outer_end, int axis_size, int inner_size) {
|
||||
#ifdef ENABLE_ARM64
|
||||
float32x4_t zeros = {0, 0, 0, 0};
|
||||
#endif
|
||||
|
@ -108,8 +109,8 @@ void ScaleInnerRelu(float *in_data, float *out_data, const float *scale, float *
|
|||
}
|
||||
}
|
||||
|
||||
void ScaleAxisRelu(float *in_data, float *out_data, const float *scale, float *offset, int outer_start, int outer_end,
|
||||
int axis_size) {
|
||||
void ScaleAxisRelu(float *in_data, float *out_data, const float *scale, const float *offset, int outer_start,
|
||||
int outer_end, int axis_size) {
|
||||
#ifdef ENABLE_ARM64
|
||||
float32x4_t zeros = {0, 0, 0, 0};
|
||||
#endif
|
||||
|
@ -135,7 +136,7 @@ void ScaleAxisRelu(float *in_data, float *out_data, const float *scale, float *o
|
|||
}
|
||||
}
|
||||
|
||||
void DoScaleRelu(float *in_data, float *out_data, float *scale, float *offset, int task_id,
|
||||
void DoScaleRelu(float *in_data, float *out_data, const float *scale, float *offset, int task_id,
|
||||
ScaleParameter *scale_param) {
|
||||
int outer_step = UP_DIV(scale_param->outer_size_, scale_param->op_parameter_.thread_num_);
|
||||
int outer_start = task_id * outer_step;
|
||||
|
@ -149,8 +150,8 @@ void DoScaleRelu(float *in_data, float *out_data, float *scale, float *offset, i
|
|||
}
|
||||
}
|
||||
|
||||
void ScaleInnerRelu6(float *in_data, float *out_data, const float *scale, float *offset, int outer_start, int outer_end,
|
||||
int axis_size, int inner_size) {
|
||||
void ScaleInnerRelu6(float *in_data, float *out_data, const float *scale, const float *offset, int outer_start,
|
||||
int outer_end, int axis_size, int inner_size) {
|
||||
#ifdef ENABLE_ARM64
|
||||
float32x4_t zeros = {0, 0, 0, 0};
|
||||
float32x4_t bounds = {6, 6, 6, 6};
|
||||
|
@ -180,8 +181,8 @@ void ScaleInnerRelu6(float *in_data, float *out_data, const float *scale, float
|
|||
}
|
||||
}
|
||||
|
||||
void ScaleAxisRelu6(float *in_data, float *out_data, const float *scale, float *offset, int outer_start, int outer_end,
|
||||
int axis_size) {
|
||||
void ScaleAxisRelu6(float *in_data, float *out_data, const float *scale, const float *offset, int outer_start,
|
||||
int outer_end, int axis_size) {
|
||||
#ifdef ENABLE_ARM64
|
||||
float32x4_t zeros = {0, 0, 0, 0};
|
||||
float32x4_t bounds = {6, 6, 6, 6};
|
||||
|
@ -208,7 +209,7 @@ void ScaleAxisRelu6(float *in_data, float *out_data, const float *scale, float *
|
|||
}
|
||||
}
|
||||
|
||||
void DoScaleRelu6(float *in_data, float *out_data, float *scale, float *offset, int task_id,
|
||||
void DoScaleRelu6(float *in_data, float *out_data, const float *scale, float *offset, int task_id,
|
||||
ScaleParameter *scale_param) {
|
||||
int outer_step = UP_DIV(scale_param->outer_size_, scale_param->op_parameter_.thread_num_);
|
||||
int outer_start = task_id * outer_step;
|
||||
|
|
|
@ -22,10 +22,11 @@
|
|||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
void DoScale(float *in_data, float *out_data, float *scale, float *offset, int task_id, ScaleParameter *scale_param);
|
||||
void DoScaleRelu(float *in_data, float *out_data, float *scale, float *offset, int task_id,
|
||||
void DoScale(float *in_data, float *out_data, const float *scale, float *offset, int task_id,
|
||||
ScaleParameter *scale_param);
|
||||
void DoScaleRelu(float *in_data, float *out_data, const float *scale, float *offset, int task_id,
|
||||
ScaleParameter *scale_param);
|
||||
void DoScaleRelu6(float *in_data, float *out_data, float *scale, float *offset, int task_id,
|
||||
void DoScaleRelu6(float *in_data, float *out_data, const float *scale, float *offset, int task_id,
|
||||
ScaleParameter *scale_param);
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -16,7 +16,6 @@
|
|||
|
||||
#include "nnacl/fp32/softmax.h"
|
||||
#include <math.h>
|
||||
#include <float.h>
|
||||
|
||||
// output = exp(input) / reduce_sum(exp(input), axis)
|
||||
void Softmax(const float *input_ptr, float *output_ptr, float *sum_data, SoftmaxParameter *parameter) {
|
||||
|
|
|
@ -16,7 +16,8 @@
|
|||
#include "nnacl/fp32/space_to_batch.h"
|
||||
#include "nnacl/arithmetic_common.h"
|
||||
|
||||
void DoSpaceToBatchNHWC(const float *input, float *output, const int *block_sizes, int *in_shape, int *out_shape) {
|
||||
void DoSpaceToBatchNHWC(const float *input, float *output, const int *block_sizes, int *in_shape,
|
||||
const int *out_shape) {
|
||||
int out_dim0 = out_shape[0];
|
||||
int out_dim1 = out_shape[1];
|
||||
int out_dim2 = out_shape[2];
|
||||
|
@ -45,7 +46,8 @@ void DoSpaceToBatchNHWC(const float *input, float *output, const int *block_size
|
|||
}
|
||||
}
|
||||
|
||||
void DoSpaceToBatchPaddingNHWC(const float *input, float *output, int *in_shape, const int *padding, int *out_shape) {
|
||||
void DoSpaceToBatchPaddingNHWC(const float *input, float *output, int *in_shape, const int *padding,
|
||||
const int *out_shape) {
|
||||
int in_h = in_shape[1];
|
||||
int in_w = in_shape[2];
|
||||
int in_c = in_shape[3];
|
||||
|
|
|
@ -30,8 +30,9 @@ typedef struct SpaceToBatchParameter {
|
|||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
void DoSpaceToBatchNHWC(const float *input, float *output, const int *block_sizes, int *in_shape, int *out_shape);
|
||||
void DoSpaceToBatchPaddingNHWC(const float *input, float *output, int *in_shape, const int *padding, int *out_shape);
|
||||
void DoSpaceToBatchNHWC(const float *input, float *output, const int *block_sizes, int *in_shape, const int *out_shape);
|
||||
void DoSpaceToBatchPaddingNHWC(const float *input, float *output, int *in_shape, const int *padding,
|
||||
const int *out_shape);
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
#include "nnacl/errorcode.h"
|
||||
#include "nnacl/op_base.h"
|
||||
|
||||
int SpaceToDepthForNHWC(const float *input, float *output, int *in_shape, int *out_shape, int shape_size,
|
||||
int SpaceToDepthForNHWC(const float *input, float *output, int *in_shape, const int *out_shape, int shape_size,
|
||||
int block_size, int h_start, int h_end) {
|
||||
if (input == NULL || output == NULL) {
|
||||
return NNACL_NULL_PTR;
|
||||
|
|
|
@ -24,7 +24,7 @@ typedef struct SpaceToDepthParameter {
|
|||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
int SpaceToDepthForNHWC(const float *input, float *output, int *in_shape, int *out_shape, int shape_size,
|
||||
int SpaceToDepthForNHWC(const float *input, float *output, int *in_shape, const int *out_shape, int shape_size,
|
||||
int block_size, int h_start, int h_end);
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -15,7 +15,7 @@
|
|||
*/
|
||||
#include "nnacl/fp32/sparse_to_dense.h"
|
||||
|
||||
void SparseToDense(int **sparse_indices, int *output_shape, const float *sparse_values, float default_value,
|
||||
void SparseToDense(int **sparse_indices, const int *output_shape, const float *sparse_values, float default_value,
|
||||
float *output, bool isScalar, int index_start, int index_end, int out_width) {
|
||||
for (int i = index_start; i < index_end; i++) {
|
||||
for (int j = 0; j < out_width; j++) {
|
||||
|
|
|
@ -21,7 +21,7 @@
|
|||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
void SparseToDense(int **sparse_indices_vect, int *output_shape, const float *sparse_values, float default_value,
|
||||
void SparseToDense(int **sparse_indices_vect, const int *output_shape, const float *sparse_values, float default_value,
|
||||
float *output, bool isScalar, int index_start, int index_end, int out_width);
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
#include "nnacl/fp32/stack.h"
|
||||
#include "nnacl/arithmetic_common.h"
|
||||
|
||||
size_t GetStackCopyNum(int axis, int *in_shape, size_t shape_size) {
|
||||
size_t GetStackCopyNum(int axis, const int *in_shape, size_t shape_size) {
|
||||
size_t one_input_size = 1;
|
||||
for (size_t i = 0; i < shape_size; ++i) {
|
||||
one_input_size *= in_shape[i];
|
||||
|
@ -68,6 +68,4 @@ void DoStackInt32(const int32_t *const *inputs, size_t input_num, int *in_shape,
|
|||
}
|
||||
}
|
||||
|
||||
void DoStackOneInput(const int8_t *input, int8_t *output, size_t data_size) {
|
||||
memcpy(output, input, data_size);
|
||||
}
|
||||
void DoStackOneInput(const int8_t *input, int8_t *output, size_t data_size) { memcpy(output, input, data_size); }
|
||||
|
|
|
@ -15,7 +15,6 @@
|
|||
*/
|
||||
|
||||
#include "nnacl/int8/batch_to_space_int8.h"
|
||||
#include "nnacl/arithmetic_common.h"
|
||||
|
||||
void BatchToSpaceNoCropForNHWCInt8(const int8_t *input, int8_t *output, const int *in_shape, int out_n,
|
||||
const int *block, QuantArg *in_quant_arg, QuantArg *out_quant_arg) {
|
||||
|
|
|
@ -491,7 +491,7 @@ void ConvDw3x3Int8Pad(int8_t *output_data, const int8_t *input_data, const int16
|
|||
/*conv depthwise sliding window perchannel int8 begin*/
|
||||
void DepthwiseBorderPixelInt8(int8_t *dst, const int8_t *src, const int16_t *weight, const int32_t *bias, int height,
|
||||
int width, int in_kh_step, int in_kw_step, int kernel_w, int8_t *input_zp,
|
||||
int32_t *out_zp, int *out_multiplier, int *left_shift, const int *right_shift,
|
||||
int32_t *out_zp, const int *out_multiplier, int *left_shift, const int *right_shift,
|
||||
int32_t *acc_min, int32_t *acc_max) {
|
||||
int tmp_buffer[C8NUM];
|
||||
for (int i = 0; i < C8NUM; i++) {
|
||||
|
@ -528,7 +528,7 @@ void DepthwiseBorderPixelInt8(int8_t *dst, const int8_t *src, const int16_t *wei
|
|||
void DepthwiseBorderInt8(int8_t *dst, const int8_t *src, const int16_t *weight, const int32_t *bias, int top,
|
||||
int bottom, int left, int right, const ConvParameter *conv_param,
|
||||
const SlidingWindowParam *sliding, int8_t *in_zp, int32_t *out_zp, int *out_multiplier,
|
||||
int *left_shift, int *right_shift, int32_t *acc_min, int32_t *acc_max) {
|
||||
int *left_shift, const int *right_shift, int32_t *acc_min, int32_t *acc_max) {
|
||||
int8_t *dst_h = dst + top * sliding->out_h_step_;
|
||||
for (int oh = top; oh < bottom; oh++) {
|
||||
int ih = oh * conv_param->stride_h_ - conv_param->pad_u_;
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
#include "nnacl/arithmetic_common.h"
|
||||
|
||||
void DoSpaceToBatchNHWCInt8(const int8_t *input, int8_t *output, const int *block_sizes, int *in_shape,
|
||||
int *out_shape) {
|
||||
const int *out_shape) {
|
||||
int out_dim0 = out_shape[0];
|
||||
int out_dim1 = out_shape[1];
|
||||
int out_dim2 = out_shape[2];
|
||||
|
@ -47,7 +47,7 @@ void DoSpaceToBatchNHWCInt8(const int8_t *input, int8_t *output, const int *bloc
|
|||
}
|
||||
|
||||
void DoSpaceToBatchPaddingNHWCInt8(const int8_t *input, int8_t *output, int *in_shape, const int *padding,
|
||||
int *out_shape, int32_t zp) {
|
||||
const int *out_shape, int32_t zp) {
|
||||
int in_h = in_shape[1];
|
||||
int in_w = in_shape[2];
|
||||
int in_c = in_shape[3];
|
||||
|
|
|
@ -21,9 +21,10 @@
|
|||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
void DoSpaceToBatchNHWCInt8(const int8_t *input, int8_t *output, const int *block_sizes, int *in_shape, int *out_shape);
|
||||
void DoSpaceToBatchNHWCInt8(const int8_t *input, int8_t *output, const int *block_sizes, int *in_shape,
|
||||
const int *out_shape);
|
||||
void DoSpaceToBatchPaddingNHWCInt8(const int8_t *input, int8_t *output, int *in_shape, const int *padding,
|
||||
int *out_shape, int32_t zp);
|
||||
const int *out_shape, int32_t zp);
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -54,7 +54,7 @@ void ResidueMatrix(const float *interval, float *b, int row, int col) {
|
|||
b[len - 1] = 1;
|
||||
}
|
||||
|
||||
int LT(float *poly_array, float *matrix_lt, int n) {
|
||||
int LT(const float *poly_array, float *matrix_lt, int n) {
|
||||
if (n > MAX_LEN) {
|
||||
return NNACL_ERR;
|
||||
}
|
||||
|
|
|
@ -32,7 +32,7 @@ void DiagonalPlusMatrix(const float *matrix, float *diagonal_matrix, int degree)
|
|||
|
||||
void ResidueMatrix(const float *interval, float *b, int row, int col);
|
||||
|
||||
int LT(float *poly_array, float *matrix_lt, int n);
|
||||
int LT(const float *poly_array, float *matrix_lt, int n);
|
||||
|
||||
void T(const float *poly_array, float *matrix_t, int n);
|
||||
|
||||
|
|
|
@ -18,8 +18,8 @@
|
|||
#include <string.h>
|
||||
#include "nnacl/errorcode.h"
|
||||
|
||||
void TransposeDim2(float *in_data, float *out_data, const int *strides, int *out_strides, int *perm, int *output_shape,
|
||||
int h_start, int h_end) {
|
||||
void TransposeDim2(float *in_data, float *out_data, const int *strides, int *out_strides, const int *perm,
|
||||
int *output_shape, int h_start, int h_end) {
|
||||
const int stride0 = strides[perm[0]];
|
||||
const int stride1 = strides[perm[1]];
|
||||
const int output0 = output_shape[0];
|
||||
|
@ -33,8 +33,8 @@ void TransposeDim2(float *in_data, float *out_data, const int *strides, int *out
|
|||
}
|
||||
}
|
||||
|
||||
void TransposeDim3(float *in_data, float *out_data, const int *strides, int *out_strides, int *perm, int *output_shape,
|
||||
int h_start, int h_end) {
|
||||
void TransposeDim3(float *in_data, float *out_data, const int *strides, int *out_strides, const int *perm,
|
||||
int *output_shape, int h_start, int h_end) {
|
||||
const int stride0 = strides[perm[0]];
|
||||
const int stride1 = strides[perm[1]];
|
||||
const int stride2 = strides[perm[2]];
|
||||
|
@ -56,8 +56,8 @@ void TransposeDim3(float *in_data, float *out_data, const int *strides, int *out
|
|||
}
|
||||
}
|
||||
|
||||
void TransposeDim4(float *in_data, float *out_data, const int *strides, int *out_strides, int *perm, int *output_shape,
|
||||
int h_start, int h_end) {
|
||||
void TransposeDim4(float *in_data, float *out_data, const int *strides, int *out_strides, const int *perm,
|
||||
int *output_shape, int h_start, int h_end) {
|
||||
const int stride0 = strides[perm[0]];
|
||||
const int stride1 = strides[perm[1]];
|
||||
const int stride2 = strides[perm[2]];
|
||||
|
@ -88,8 +88,8 @@ void TransposeDim4(float *in_data, float *out_data, const int *strides, int *out
|
|||
}
|
||||
}
|
||||
|
||||
void TransposeDim5(float *in_data, float *out_data, const int *strides, int *out_strides, int *perm, int *output_shape,
|
||||
int h_start, int h_end) {
|
||||
void TransposeDim5(float *in_data, float *out_data, const int *strides, int *out_strides, const int *perm,
|
||||
int *output_shape, int h_start, int h_end) {
|
||||
const int stride0 = strides[perm[0]];
|
||||
const int stride1 = strides[perm[1]];
|
||||
const int stride2 = strides[perm[2]];
|
||||
|
@ -127,8 +127,8 @@ void TransposeDim5(float *in_data, float *out_data, const int *strides, int *out
|
|||
}
|
||||
}
|
||||
|
||||
void TransposeDims(float *in_data, float *out_data, const int *strides, int *out_strides, int *perm, int *output_shape,
|
||||
int h_start, int h_end, int dims, int *size, int *position) {
|
||||
void TransposeDims(float *in_data, float *out_data, const int *strides, int *out_strides, const int *perm,
|
||||
int *output_shape, int h_start, int h_end, int dims, int *size, int *position) {
|
||||
*(size + dims - 1) = 1;
|
||||
for (int i = dims - 1; i > 0; --i) {
|
||||
*(size + i - 1) = *(size + i) * output_shape[i];
|
||||
|
|
|
@ -34,16 +34,16 @@ extern "C" {
|
|||
#endif
|
||||
int DoTranspose(float *in_data, float *out_data, int *input_shape, int *output_shape,
|
||||
TransposeParameter *transpose_param, int h_start, int h_end, int *size, int *position);
|
||||
void TransposeDim2(float *in_data, float *out_data, const int *strides, int *out_strides, int *perm, int *output_shape,
|
||||
int h_start, int h_end);
|
||||
void TransposeDim3(float *in_data, float *out_data, const int *strides, int *out_strides, int *perm, int *output_shape,
|
||||
int h_start, int h_end);
|
||||
void TransposeDim4(float *in_data, float *out_data, const int *strides, int *out_strides, int *perm, int *output_shape,
|
||||
int h_start, int h_end);
|
||||
void TransposeDim5(float *in_data, float *out_data, const int *strides, int *out_strides, int *perm, int *output_shape,
|
||||
int h_start, int h_end);
|
||||
void TransposeDims(float *in_data, float *out_data, const int *strides, int *out_strides, int *perm, int *output_shape,
|
||||
int h_start, int h_end, int dims, int *size, int *position);
|
||||
void TransposeDim2(float *in_data, float *out_data, const int *strides, int *out_strides, const int *perm,
|
||||
int *output_shape, int h_start, int h_end);
|
||||
void TransposeDim3(float *in_data, float *out_data, const int *strides, int *out_strides, const int *perm,
|
||||
int *output_shape, int h_start, int h_end);
|
||||
void TransposeDim4(float *in_data, float *out_data, const int *strides, int *out_strides, const int *perm,
|
||||
int *output_shape, int h_start, int h_end);
|
||||
void TransposeDim5(float *in_data, float *out_data, const int *strides, int *out_strides, const int *perm,
|
||||
int *output_shape, int h_start, int h_end);
|
||||
void TransposeDims(float *in_data, float *out_data, const int *strides, int *out_strides, const int *perm,
|
||||
int *output_shape, int h_start, int h_end, int dims, int *size, int *position);
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -15,7 +15,8 @@
|
|||
*/
|
||||
#include "nnacl/where.h"
|
||||
|
||||
void Where(bool *input, float *input1, const float *input2, float *output, WhereParameter *where_param_, int task_id) {
|
||||
void Where(bool *input, const float *input1, const float *input2, float *output, WhereParameter *where_param_,
|
||||
int task_id) {
|
||||
for (int i = task_id; i < where_param_->number_; i += where_param_->op_parameter_.thread_num_) {
|
||||
if (input[where_param_->num_ > 1 ? i : 0] == true) {
|
||||
output[i] = input1[where_param_->num1_ > 1 ? i : 0];
|
||||
|
|
|
@ -30,7 +30,8 @@ typedef struct WhereParameter {
|
|||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
void Where(bool *input, float *input1, const float *input2, float *output, WhereParameter *where_param_, int task_id);
|
||||
void Where(bool *input, const float *input1, const float *input2, float *output, WhereParameter *where_param_,
|
||||
int task_id);
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -75,7 +75,7 @@ static OutputTransFunc OutputTransFuncRelu6List8[] = {NULL,
|
|||
OutputTransform8x6Relu6Unit,
|
||||
OutputTransform8x7Relu6Unit};
|
||||
|
||||
void GeneralInputTransformUnit(const float *src_data, float *dst_data, float *matrix_b, const float *matrix_bt,
|
||||
void GeneralInputTransformUnit(const float *src_data, float *dst_data, const float *matrix_b, const float *matrix_bt,
|
||||
int src_step, int dst_step, int in_unit) {
|
||||
int len = in_unit * in_unit;
|
||||
if (len > MAX_LEN) return;
|
||||
|
@ -112,7 +112,7 @@ void GeneralInputTransformUnit(const float *src_data, float *dst_data, float *ma
|
|||
#endif
|
||||
}
|
||||
|
||||
void GeneralOutputTransformUnit(const float *src_data, float *dst_data, const float *bias_data, float *matrix_a,
|
||||
void GeneralOutputTransformUnit(const float *src_data, float *dst_data, const float *bias_data, const float *matrix_a,
|
||||
const float *matrix_at, int src_step, int dst_step, int in_unit, int out_unit) {
|
||||
int src_len = in_unit * in_unit;
|
||||
if (src_len > MAX_LEN) {
|
||||
|
|
|
@ -33,10 +33,10 @@ typedef void (*InputTransFunc)(const float *src_data, float *dst_data, int src_s
|
|||
typedef void (*OutputTransFunc)(const float *src_data, float *dst_data, const float *bias_data, int src_step,
|
||||
int dst_step, int out_c, int r_w, int r_h, int r_c);
|
||||
|
||||
void GeneralInputTransformUnit(const float *src_data, float *dst_data, float *matrix_b, const float *matrix_bt,
|
||||
void GeneralInputTransformUnit(const float *src_data, float *dst_data, const float *matrix_b, const float *matrix_bt,
|
||||
int src_step, int dst_step, int in_unit);
|
||||
|
||||
void GeneralOutputTransformUnit(const float *src_data, float *dst_data, const float *bias_data, float *matrix_a,
|
||||
void GeneralOutputTransformUnit(const float *src_data, float *dst_data, const float *bias_data, const float *matrix_a,
|
||||
const float *matrix_at, int src_step, int dst_step, int in_unit, int out_unit);
|
||||
|
||||
#define Load16Data \
|
||||
|
|
|
@ -85,7 +85,7 @@ std::string RealPath(const char *path) {
|
|||
return res;
|
||||
}
|
||||
|
||||
int CompareOutputData(const float *output_data, size_t output_size, float *correct_data, size_t data_size) {
|
||||
int CompareOutputData(const float *output_data, size_t output_size, const float *correct_data, size_t data_size) {
|
||||
if (output_size != data_size) {
|
||||
printf("compare failed, output_size %zu isn't equal to data_size %zu.\n", output_size, data_size);
|
||||
return 0;
|
||||
|
@ -107,7 +107,7 @@ int CompareOutputData(const float *output_data, size_t output_size, float *corre
|
|||
return 0;
|
||||
}
|
||||
|
||||
int CompareOutput(float *output_data, size_t output_num, std::string file_path) {
|
||||
int CompareOutput(const float *output_data, size_t output_num, std::string file_path) {
|
||||
size_t ground_truth_size;
|
||||
auto ground_truth = reinterpret_cast<float *>(mindspore::lite::ReadFile(file_path.c_str(), &ground_truth_size));
|
||||
size_t ground_truth_num = ground_truth_size / sizeof(float);
|
||||
|
|
|
@ -58,8 +58,8 @@ inline int WriteToBin(const std::string &file_path, void *data, size_t size) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
int CompareOutputData(const float *output_data, size_t output_num, float *correct_data, size_t data_size);
|
||||
int CompareOutput(float *output_data, size_t output_num, std::string file_path);
|
||||
int CompareOutputData(const float *output_data, size_t output_num, const float *correct_data, size_t data_size);
|
||||
int CompareOutput(const float *output_data, size_t output_num, std::string file_path);
|
||||
|
||||
std::string GetAndroidPackageName();
|
||||
std::string GetAndroidPackagePath();
|
||||
|
|
|
@ -21,7 +21,7 @@
|
|||
|
||||
namespace mindspore {
|
||||
namespace lite {
|
||||
static float CompareOutputRelativeData(const float *output_data, float *correct_data, int data_size) {
|
||||
static float CompareOutputRelativeData(const float *output_data, const float *correct_data, int data_size) {
|
||||
float error = 0;
|
||||
|
||||
// relative error
|
||||
|
@ -38,7 +38,7 @@ static float CompareOutputRelativeData(const float *output_data, float *correct_
|
|||
return error;
|
||||
}
|
||||
|
||||
int CompareRelativeOutput(float *output_data, std::string file_path) {
|
||||
int CompareRelativeOutput(const float *output_data, std::string file_path) {
|
||||
size_t output_size;
|
||||
auto ground_truth = reinterpret_cast<float *>(mindspore::lite::ReadFile(file_path.c_str(), &output_size));
|
||||
if (ground_truth == nullptr) {
|
||||
|
@ -53,7 +53,7 @@ int CompareRelativeOutput(float *output_data, std::string file_path) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
float RelativeOutputError(float *output_data, std::string file_path) {
|
||||
float RelativeOutputError(const float *output_data, std::string file_path) {
|
||||
size_t output_size;
|
||||
auto ground_truth = reinterpret_cast<float *>(mindspore::lite::ReadFile(file_path.c_str(), &output_size));
|
||||
size_t output_num = output_size / sizeof(float);
|
||||
|
|
|
@ -20,8 +20,8 @@
|
|||
|
||||
namespace mindspore {
|
||||
namespace lite {
|
||||
int CompareRelativeOutput(float *output_data, std::string file_path);
|
||||
float RelativeOutputError(float *output_data, std::string file_path);
|
||||
int CompareRelativeOutput(const float *output_data, std::string file_path);
|
||||
float RelativeOutputError(const float *output_data, std::string file_path);
|
||||
} // namespace lite
|
||||
} // namespace mindspore
|
||||
#endif // MINDSPORE_LITE_COMMON_FILE_UTILS_EXT_H_
|
||||
|
|
|
@ -37,6 +37,7 @@ int AssignAdd::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &
|
|||
<< schema::EnumNamePrimitiveType(primitive_->value.type) << "is not equal"
|
||||
<< schema::EnumNamePrimitiveType(schema::PrimitiveType_AssignAdd);
|
||||
delete this->primitive_;
|
||||
this->primitive_ = nullptr;
|
||||
return RET_ERROR;
|
||||
}
|
||||
if (this->primitive_->value.value == nullptr) {
|
||||
|
@ -44,6 +45,7 @@ int AssignAdd::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &
|
|||
if (this->primitive_->value.value == nullptr) {
|
||||
MS_LOG(ERROR) << "new primitiveT value failed";
|
||||
delete this->primitive_;
|
||||
this->primitive_ = nullptr;
|
||||
return RET_ERROR;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -43,6 +43,7 @@ int BinaryCrossEntropyGrad::UnPackAttr(const Primitive &prim, const std::vector<
|
|||
<< schema::EnumNamePrimitiveType(primitive_->value.type) << "is not equal"
|
||||
<< schema::EnumNamePrimitiveType(schema::PrimitiveType_BinaryCrossEntropyGrad);
|
||||
delete this->primitive_;
|
||||
this->primitive_ = nullptr;
|
||||
return RET_ERROR;
|
||||
}
|
||||
if (this->primitive_->value.value == nullptr) {
|
||||
|
@ -58,6 +59,8 @@ int BinaryCrossEntropyGrad::UnPackAttr(const Primitive &prim, const std::vector<
|
|||
MS_LOG(ERROR) << "get reduction failed!";
|
||||
delete this->primitive_;
|
||||
delete attr;
|
||||
this->primitive_ = nullptr;
|
||||
attr = nullptr;
|
||||
return RET_ERROR;
|
||||
} else {
|
||||
reduction = GetValue<string>(prim.GetAttr("reduction"));
|
||||
|
|
|
@ -32,6 +32,7 @@ int ControlDepend::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePt
|
|||
if (this->primitive_->value.type != schema::PrimitiveType_ControlDepend) {
|
||||
MS_LOG(ERROR) << "primitive_ type is error:" << this->primitive_->value.type;
|
||||
delete this->primitive_;
|
||||
this->primitive_ = nullptr;
|
||||
return RET_ERROR;
|
||||
}
|
||||
if (this->primitive_->value.value == nullptr) {
|
||||
|
@ -39,6 +40,7 @@ int ControlDepend::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePt
|
|||
if (attr == nullptr) {
|
||||
MS_LOG(ERROR) << "attr is nullptr";
|
||||
delete this->primitive_;
|
||||
this->primitive_ = nullptr;
|
||||
return RET_ERROR;
|
||||
}
|
||||
this->primitive_->value.value = attr;
|
||||
|
|
|
@ -39,12 +39,14 @@ int ExpandDims::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr>
|
|||
if (this->primitive_->value.type != schema::PrimitiveType_ExpandDims) {
|
||||
MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type;
|
||||
delete this->primitive_;
|
||||
this->primitive_ = nullptr;
|
||||
return RET_ERROR;
|
||||
}
|
||||
if (this->primitive_->value.value == nullptr) {
|
||||
auto attr = new (std::nothrow) schema::ExpandDimsT();
|
||||
if (attr == nullptr) {
|
||||
delete this->primitive_;
|
||||
this->primitive_ = nullptr;
|
||||
MS_LOG(ERROR) << "new primitiveT value failed";
|
||||
return RET_ERROR;
|
||||
}
|
||||
|
@ -57,6 +59,8 @@ int ExpandDims::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr>
|
|||
MS_LOG(ERROR) << "input axis is not value node.";
|
||||
delete this->primitive_;
|
||||
delete attr;
|
||||
this->primitive_ = nullptr;
|
||||
attr = nullptr;
|
||||
return RET_ERROR;
|
||||
}
|
||||
this->primitive_->value.value = attr;
|
||||
|
|
|
@ -44,6 +44,7 @@ int Gather::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inp
|
|||
MS_LOG(ERROR) << "Gather primitive value type : " << schema::EnumNamePrimitiveType(primitive_->value.type)
|
||||
<< "is not equal" << schema::EnumNamePrimitiveType(schema::PrimitiveType_Gather);
|
||||
delete this->primitive_;
|
||||
this->primitive_ = nullptr;
|
||||
return RET_ERROR;
|
||||
}
|
||||
if (this->primitive_->value.value == nullptr) {
|
||||
|
@ -52,6 +53,8 @@ int Gather::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inp
|
|||
MS_LOG(ERROR) << "new primitive value.value error";
|
||||
delete this->primitive_;
|
||||
delete gather_attr;
|
||||
this->primitive_ = nullptr;
|
||||
gather_attr = nullptr;
|
||||
return RET_ERROR;
|
||||
}
|
||||
if (inputs[2]->isa<ValueNode>()) {
|
||||
|
|
|
@ -37,6 +37,7 @@ int OnesLike::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &i
|
|||
<< schema::EnumNamePrimitiveType(primitive_->value.type) << "is not equal"
|
||||
<< schema::EnumNamePrimitiveType(schema::PrimitiveType_OnesLike);
|
||||
delete this->primitive_;
|
||||
this->primitive_ = nullptr;
|
||||
return RET_ERROR;
|
||||
}
|
||||
if (this->primitive_->value.value == nullptr) {
|
||||
|
@ -44,6 +45,7 @@ int OnesLike::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &i
|
|||
if (this->primitive_->value.value == nullptr) {
|
||||
MS_LOG(ERROR) << "new primitiveT value failed";
|
||||
delete this->primitive_;
|
||||
this->primitive_ = nullptr;
|
||||
return RET_ERROR;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -35,6 +35,8 @@ OpParameter *PopulateLayerNormParameter(const mindspore::lite::PrimitiveC *primi
|
|||
layer_norm_parameter->normalized_shape_ = reinterpret_cast<int *>(malloc(normalized_shape.size() * sizeof(int)));
|
||||
if (layer_norm_parameter->normalized_shape_ == nullptr) {
|
||||
MS_LOG(ERROR) << "malloc layer_norm_parameter->normalized_shape_ failed.";
|
||||
free(layer_norm_parameter);
|
||||
layer_norm_parameter = nullptr;
|
||||
return nullptr;
|
||||
}
|
||||
for (size_t i = 0; i < normalized_shape.size(); i++) {
|
||||
|
|
|
@ -43,6 +43,7 @@ int Power::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inpu
|
|||
if (this->primitive_->value.type != schema::PrimitiveType_Power) {
|
||||
MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type;
|
||||
delete this->primitive_;
|
||||
this->primitive_ = nullptr;
|
||||
return RET_ERROR;
|
||||
}
|
||||
if (this->primitive_->value.value == nullptr) {
|
||||
|
@ -50,6 +51,7 @@ int Power::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inpu
|
|||
if (attr == nullptr) {
|
||||
MS_LOG(ERROR) << "new primitiveT value failed";
|
||||
delete this->primitive_;
|
||||
this->primitive_ = nullptr;
|
||||
return RET_ERROR;
|
||||
}
|
||||
|
||||
|
|
|
@ -63,9 +63,7 @@ int Resize::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inp
|
|||
} else if (prim.instance_name() == "ResizeBilinear") {
|
||||
attr->method = schema::ResizeMethod_LINEAR;
|
||||
} else {
|
||||
if (attr != nullptr) {
|
||||
delete attr;
|
||||
}
|
||||
delete attr;
|
||||
MS_LOG(ERROR) << "wrong resize type";
|
||||
return RET_ERROR;
|
||||
}
|
||||
|
|
|
@ -41,6 +41,7 @@ int Sub::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs
|
|||
if (this->primitive_->value.type != schema::PrimitiveType_Sub) {
|
||||
MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type;
|
||||
delete this->primitive_;
|
||||
this->primitive_ = nullptr;
|
||||
return RET_ERROR;
|
||||
}
|
||||
if (this->primitive_->value.value == nullptr) {
|
||||
|
@ -48,6 +49,7 @@ int Sub::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs
|
|||
if (attr == nullptr) {
|
||||
MS_LOG(ERROR) << "new primitiveT value failed";
|
||||
delete this->primitive_;
|
||||
this->primitive_ = nullptr;
|
||||
return RET_ERROR;
|
||||
}
|
||||
// todo: confirm the activationType
|
||||
|
|
|
@ -41,6 +41,7 @@ int UnsortedSegmentSum::UnPackAttr(const Primitive &prim, const std::vector<AnfN
|
|||
<< schema::EnumNamePrimitiveType(primitive_->value.type) << "is not equal"
|
||||
<< schema::EnumNamePrimitiveType(schema::PrimitiveType_UnsortedSegmentSum);
|
||||
delete this->primitive_;
|
||||
this->primitive_ = nullptr;
|
||||
return RET_ERROR;
|
||||
}
|
||||
if (this->primitive_->value.value == nullptr) {
|
||||
|
|
|
@ -171,15 +171,15 @@ class ArithmeticCPUKernel : public LiteKernel {
|
|||
|
||||
private:
|
||||
int BroadcastRun(void *input0, void *input1, void *output, int dim, int out_count, int out_thread_stride);
|
||||
int break_pos_;
|
||||
int outside_;
|
||||
int thread_count_;
|
||||
ArithmeticParameter *arithmeticParameter_;
|
||||
int break_pos_ = 0;
|
||||
int outside_ = 0;
|
||||
int thread_count_ = 1;
|
||||
ArithmeticParameter *arithmeticParameter_ = nullptr;
|
||||
ArithmeticRun arithmetic_run_ = nullptr;
|
||||
ArithmeticOptRun arithmetic_opt_run_ = nullptr;
|
||||
ArithmeticIntRun arithmetic_run_int_ = nullptr;
|
||||
ArithmeticOptIntRun arithmetic_opt_run_int_ = nullptr;
|
||||
LiteDataType data_type_;
|
||||
LiteDataType data_type_ = kDataTypeFloat;
|
||||
};
|
||||
} // namespace mindspore::kernel
|
||||
#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_ARITHMETIC_H_
|
||||
|
|
|
@ -68,22 +68,24 @@ void DeConvolutionWinogradCPUKernel::FreeResizeBuf() {
|
|||
}
|
||||
|
||||
void DeConvolutionWinogradCPUKernel::FreeDeconvParam() {
|
||||
for (int i = 0; i < deconv_param_->compute_size_; i++) {
|
||||
DeConvComputeUnit &unit = deconv_param_->compute_units_[i];
|
||||
if (deconv_param_ != nullptr) {
|
||||
for (int i = 0; i < deconv_param_->compute_size_; i++) {
|
||||
DeConvComputeUnit &unit = deconv_param_->compute_units_[i];
|
||||
|
||||
if (unit.weight_ != nullptr) {
|
||||
free(unit.weight_);
|
||||
unit.weight_ = nullptr;
|
||||
}
|
||||
|
||||
if (unit.use_winograd_) {
|
||||
if (unit.winograd_.AT_ != nullptr) {
|
||||
free(unit.winograd_.AT_);
|
||||
unit.winograd_.AT_ = nullptr;
|
||||
if (unit.weight_ != nullptr) {
|
||||
free(unit.weight_);
|
||||
unit.weight_ = nullptr;
|
||||
}
|
||||
if (unit.winograd_.BT_ != nullptr) {
|
||||
free(unit.winograd_.BT_);
|
||||
unit.winograd_.BT_ = nullptr;
|
||||
|
||||
if (unit.use_winograd_) {
|
||||
if (unit.winograd_.AT_ != nullptr) {
|
||||
free(unit.winograd_.AT_);
|
||||
unit.winograd_.AT_ = nullptr;
|
||||
}
|
||||
if (unit.winograd_.BT_ != nullptr) {
|
||||
free(unit.winograd_.BT_);
|
||||
unit.winograd_.BT_ = nullptr;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -204,7 +204,7 @@ int MirrorPadImplInt8(void *cdata, int task_id) {
|
|||
return RET_OK;
|
||||
}
|
||||
|
||||
int PadInt8CPUKernel::CheckPaddings(const int *paddings, int length, int *input_shape, int mode) {
|
||||
int PadInt8CPUKernel::CheckPaddings(const int *paddings, int length, const int *input_shape, int mode) {
|
||||
if (paddings == nullptr || input_shape == nullptr) {
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
|
|
|
@ -48,7 +48,7 @@ class PadInt8CPUKernel : public LiteKernel {
|
|||
|
||||
private:
|
||||
int HandleMirrorPad();
|
||||
int CheckPaddings(const int *paddings, int length, int *input_shape, int mode);
|
||||
int CheckPaddings(const int *paddings, int length, const int *input_shape, int mode);
|
||||
int CopyPaddingFromInput();
|
||||
void CalculateStrides();
|
||||
int ExtendPaddings(int *paddings, int length, const int *ori_paddings, int ori_length);
|
||||
|
|
Loading…
Reference in New Issue