commit
48b0fe0b11
|
@ -23,7 +23,7 @@ int L2NormalizationInt8(const int8_t *input_data, int8_t *output_data, const L2N
|
|||
const int inner_size = param->shape_[param->shape_num_ - 1];
|
||||
|
||||
for (int i = begin; i < end; ++i) {
|
||||
int32_t square_sum = 0.0f;
|
||||
int32_t square_sum = 0;
|
||||
for (int j = 0; j < inner_size; ++j) {
|
||||
int32_t in = input_data[i * inner_size + j] - quant_param->in_.zp_;
|
||||
square_sum += in * in;
|
||||
|
|
|
@ -120,9 +120,9 @@ void PackInputSum16x4PerLayer(const int8_t *src, int32_t *dst, int32_t filter_zp
|
|||
#ifdef ENABLE_ARM
|
||||
PreSum4x16Int8Pert(src, dst, row4, col16, filter_zp);
|
||||
#else
|
||||
for (int r = 0; r < row4; r++) {
|
||||
for (size_t r = 0; r < row4; r++) {
|
||||
int32_t tmp_value = 0;
|
||||
for (int c = 0; c < col16; c++) {
|
||||
for (size_t c = 0; c < col16; c++) {
|
||||
int r4div = r / C4NUM, r4mod = r % C4NUM, c16div = c / C16NUM, c16mod = c % C16NUM;
|
||||
int src_index = r4div * C4NUM * col16 + c16div * C16NUM * C4NUM + r4mod * C16NUM + c16mod;
|
||||
tmp_value += src[src_index];
|
||||
|
|
|
@ -88,7 +88,7 @@ int PadInt8CPUKernel::SetQuantParam() {
|
|||
int PadInt8CPUKernel::InitPadParam() {
|
||||
auto in_dims = in_tensors_.at(0)->shape();
|
||||
auto out_dims = out_tensors_.at(0)->shape();
|
||||
int ndims = static_cast<size_t>(in_dims.size());
|
||||
int ndims = static_cast<int>(in_dims.size());
|
||||
|
||||
int in[] = {1, 1, 1, 1};
|
||||
int out[] = {1, 1, 1, 1};
|
||||
|
|
Loading…
Reference in New Issue