forked from mindspore-Ecosystem/mindspore
!9259 [MSLITE][Develop] fix code review
From: @sunsuodong Reviewed-by: @zhanghaibo5,@zhang_xue_tong Signed-off-by: @zhang_xue_tong
This commit is contained in:
commit
35cc50b34c
|
@ -21,7 +21,10 @@
|
|||
|
||||
typedef struct EluParameter {
|
||||
OpParameter op_parameter_;
|
||||
// primitive parameter
|
||||
float alpha_;
|
||||
|
||||
// shape correlative
|
||||
int in_size_;
|
||||
} EluParameter;
|
||||
|
||||
|
|
|
@ -21,8 +21,11 @@
|
|||
|
||||
typedef struct EmbeddingLookupParameter {
|
||||
OpParameter op_parameter_;
|
||||
bool *is_regulated_;
|
||||
// primitive parameter
|
||||
float max_norm_;
|
||||
|
||||
// shape correlative
|
||||
bool *is_regulated_;
|
||||
int ids_size_;
|
||||
int layer_size_;
|
||||
int layer_num_;
|
||||
|
|
|
@ -68,7 +68,7 @@ void ExpFp32(const float *src, float *dst, int num) {
|
|||
float decimal = input - integer * param[0];
|
||||
int int_exp = (integer + 127) << 23;
|
||||
memcpy(dst + i, &int_exp, sizeof(float));
|
||||
float decimal_exp =
|
||||
const float decimal_exp =
|
||||
1.0f + decimal * (1.0f + decimal * (0.5f + decimal * (param[3] + decimal * (param[2] + decimal * param[1]))));
|
||||
dst[i] *= decimal_exp;
|
||||
}
|
||||
|
|
|
@ -70,7 +70,7 @@ void SumAndDiv(const float *src, float *dst, int batch, int channel) {
|
|||
}
|
||||
int k = 0;
|
||||
#ifdef ENABLE_NEON
|
||||
float div = 1.0f / sum;
|
||||
const float div = 1.0f / sum;
|
||||
for (; k < channel - C4NUM; k += C4NUM) {
|
||||
vst1q_f32(dst + cur_batch_offset + k, vmulq_n_f32(vld1q_f32(src + cur_batch_offset + k), div));
|
||||
}
|
||||
|
|
|
@ -34,7 +34,7 @@ int DoQuantizeFp32ToInt8(const float *real_values, int8_t *quant_values, float s
|
|||
return NNACL_PARAM_INVALID;
|
||||
}
|
||||
|
||||
float inverse_scale = 1.0f / scale;
|
||||
const float inverse_scale = 1.0f / scale;
|
||||
for (int i = 0; i < size; ++i) {
|
||||
int temp = round(real_values[i] * inverse_scale + zp);
|
||||
temp = temp < 127 ? temp : 127;
|
||||
|
|
|
@ -80,9 +80,9 @@ int ResizeBilinearWithFloatScaleInt8(const int8_t *input_ptr, int8_t *output_ptr
|
|||
int32_t y_lower_value = quant_arg.y_axis_lower_[ori_out_h];
|
||||
int32_t y_upper_value = quant_arg.y_axis_upper_[ori_out_h];
|
||||
float weight_x = quant_arg.x_axis_index_[ori_out_w] - x_lower_value;
|
||||
float one_minus_weight_x = 1 - weight_x;
|
||||
const float one_minus_weight_x = 1 - weight_x;
|
||||
float weight_y = quant_arg.y_axis_index_[ori_out_h] - y_lower_value;
|
||||
float one_minus_weight_y = 1 - weight_y;
|
||||
const float one_minus_weight_y = 1 - weight_y;
|
||||
float left_bottom_coef = one_minus_weight_x * one_minus_weight_y;
|
||||
float left_top_coef = weight_y * one_minus_weight_x;
|
||||
float right_bottom_coef = weight_x * one_minus_weight_y;
|
||||
|
|
|
@ -166,6 +166,5 @@ bool IsSupportFloat16() {
|
|||
#endif
|
||||
return status;
|
||||
}
|
||||
|
||||
} // namespace lite
|
||||
} // namespace mindspore
|
||||
|
|
|
@ -13,9 +13,8 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef LITE_MINDSPORE_LITE_C_OPS_OP_STRIDED_SLICE_POPULATE_H
|
||||
#define LITE_MINDSPORE_LITE_C_OPS_OP_STRIDED_SLICE_POPULATE_H
|
||||
#ifndef MINDSPORE_LITE_SRC_OPS_POPULATE_STRIDED_SLICE_POPULATE_H_
|
||||
#define MINDSPORE_LITE_SRC_OPS_POPULATE_STRIDED_SLICE_POPULATE_H_
|
||||
|
||||
#include "src/ops/arithmetic.h"
|
||||
|
||||
|
@ -26,4 +25,4 @@ OpParameter *PopulateStridedSliceParameter(const mindspore::lite::PrimitiveC *pr
|
|||
|
||||
} // namespace lite
|
||||
} // namespace mindspore
|
||||
#endif
|
||||
#endif // MINDSPORE_LITE_SRC_OPS_POPULATE_STRIDED_SLICE_POPULATE_H_
|
||||
|
|
|
@ -97,7 +97,7 @@ int ConvolutionGradInputCPUKernel::Execute(int task_id) {
|
|||
for (j = 0; j < groups; ++j) {
|
||||
GemmCb gcb;
|
||||
for (int ci = 0; ci < m; ci += chunk) {
|
||||
float *mat_b;
|
||||
float *mat_b = nullptr;
|
||||
if (ci == 0) {
|
||||
mat_b = w_addr + j * nweights / groups;
|
||||
gcb.ca = 0;
|
||||
|
|
|
@ -80,6 +80,9 @@ int ArgMinMaxInt8CPUKernel::Run() {
|
|||
case 3:
|
||||
Int8ArgMinMaxDim3(input_data, output_data, in_shape.data(), param, &in_quant_arg_, &out_quant_arg_);
|
||||
break;
|
||||
default:
|
||||
MS_LOG(ERROR) << "axis is invalid";
|
||||
return RET_ERROR;
|
||||
}
|
||||
return RET_OK;
|
||||
}
|
||||
|
@ -88,13 +91,10 @@ kernel::LiteKernel *CpuArgMinMaxInt8KernelCreator(const std::vector<lite::Tensor
|
|||
const std::vector<lite::Tensor *> &outputs, OpParameter *op_parameter,
|
||||
const lite::InnerContext *ctx, const kernel::KernelKey &desc,
|
||||
const mindspore::lite::PrimitiveC *primitive) {
|
||||
if (op_parameter == nullptr) {
|
||||
MS_LOG(ERROR) << "Input op_parameter is nullptr!";
|
||||
return nullptr;
|
||||
}
|
||||
auto kernel = new (std::nothrow) ArgMinMaxInt8CPUKernel(op_parameter, inputs, outputs, ctx, primitive);
|
||||
if (kernel == nullptr) {
|
||||
MS_LOG(ERROR) << "new ArgMinMaxInt8CPUKernel fail!";
|
||||
free(op_parameter);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
|
|
|
@ -87,13 +87,10 @@ kernel::LiteKernel *CpuBatchToSpaceInt8KernelCreator(const std::vector<lite::Ten
|
|||
OpParameter *op_parameter, const lite::InnerContext *ctx,
|
||||
const kernel::KernelKey &desc,
|
||||
const mindspore::lite::PrimitiveC *primitive) {
|
||||
if (op_parameter == nullptr) {
|
||||
MS_LOG(ERROR) << "Input op_parameter is nullptr!";
|
||||
return nullptr;
|
||||
}
|
||||
auto *kernel = new (std::nothrow) BatchToSpaceInt8CPUKernel(op_parameter, inputs, outputs, ctx, primitive);
|
||||
if (kernel == nullptr) {
|
||||
MS_LOG(ERROR) << "new BatchToSpaceInt8CPUKernel fail!";
|
||||
free(op_parameter);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
|
|
|
@ -72,16 +72,6 @@ kernel::LiteKernel *CpuBiasAddInt8KernelCreator(const std::vector<lite::Tensor *
|
|||
const std::vector<lite::Tensor *> &outputs, OpParameter *parameter,
|
||||
const lite::InnerContext *ctx, const KernelKey &desc,
|
||||
const mindspore::lite::PrimitiveC *primitive) {
|
||||
if (parameter == nullptr) {
|
||||
MS_LOG(ERROR) << "parameter is nullptr";
|
||||
return nullptr;
|
||||
}
|
||||
if (ctx == nullptr) {
|
||||
MS_LOG(ERROR) << "ctx is nullptr";
|
||||
free(parameter);
|
||||
return nullptr;
|
||||
}
|
||||
MS_ASSERT(desc.type == PrimitiveType_BiasAdd);
|
||||
auto *kernel = new (std::nothrow) BiasAddInt8CPUKernel(parameter, inputs, outputs, ctx, primitive);
|
||||
if (kernel == nullptr) {
|
||||
MS_LOG(ERROR) << "Create kernel failed, name: " << parameter->name_;
|
||||
|
|
|
@ -145,14 +145,10 @@ kernel::LiteKernel *CpuConcatInt8KernelCreator(const std::vector<lite::Tensor *>
|
|||
const std::vector<lite::Tensor *> &outputs, OpParameter *opParameter,
|
||||
const InnerContext *ctx, const kernel::KernelKey &desc,
|
||||
const mindspore::lite::PrimitiveC *primitive) {
|
||||
if (opParameter == nullptr) {
|
||||
MS_LOG(ERROR) << "Input opParameter is nullptr!";
|
||||
return nullptr;
|
||||
}
|
||||
MS_ASSERT(desc.type == schema::PrimitiveType_Concat);
|
||||
auto *kernel = new (std::nothrow) ConcatInt8CPUKernel(opParameter, inputs, outputs, ctx, primitive);
|
||||
if (kernel == nullptr) {
|
||||
MS_LOG(ERROR) << "new ConcatCPUKernel fail!";
|
||||
free(opParameter);
|
||||
return nullptr;
|
||||
}
|
||||
auto ret = kernel->Init();
|
||||
|
|
|
@ -115,6 +115,10 @@ int ConvolutionInt8CPUKernel::InitWeightBias() {
|
|||
bool filter_peroc = conv_quant_arg_->per_channel_ & FILTER_PER_CHANNEL;
|
||||
if (filter_peroc) {
|
||||
filter_zp_ptr_ = reinterpret_cast<int32_t *>(malloc(output_channel * sizeof(int32_t)));
|
||||
if (filter_zp_ptr_ == nullptr) {
|
||||
MS_LOG(ERROR) << "Memory allocation failed";
|
||||
return RET_ERROR;
|
||||
}
|
||||
}
|
||||
for (int oc = 0; oc < output_channel; oc++) {
|
||||
int32_t filter_zp = conv_param_->conv_quant_arg_.filter_quant_args_[0].zp_;
|
||||
|
|
|
@ -92,14 +92,10 @@ kernel::LiteKernel *CpuCropInt8KernelCreator(const std::vector<lite::Tensor *> &
|
|||
const std::vector<lite::Tensor *> &outputs, OpParameter *opParameter,
|
||||
const InnerContext *ctx, const kernel::KernelKey &desc,
|
||||
const mindspore::lite::PrimitiveC *primitive) {
|
||||
if (opParameter == nullptr) {
|
||||
MS_LOG(ERROR) << "Input opParameter is nullptr!";
|
||||
return nullptr;
|
||||
}
|
||||
MS_ASSERT(desc.type == schema::PrimitiveType_Crop);
|
||||
auto *kernel = new (std::nothrow) CropInt8CPUKernel(opParameter, inputs, outputs, ctx, primitive);
|
||||
if (kernel == nullptr) {
|
||||
MS_LOG(ERROR) << "new CropCPUKernel fail!";
|
||||
free(opParameter);
|
||||
return nullptr;
|
||||
}
|
||||
auto ret = kernel->Init();
|
||||
|
|
|
@ -83,6 +83,7 @@ kernel::LiteKernel *CpuDepthToSpaceInt8KernelCreator(const std::vector<lite::Ten
|
|||
auto *kernel = new (std::nothrow) DepthToSpaceInt8CPUKernel(op_parameter, inputs, outputs, ctx, primitive);
|
||||
if (kernel == nullptr) {
|
||||
MS_LOG(ERROR) << "new BatchToSpaceInt8CPUKernel fail!";
|
||||
free(op_parameter);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
|
|
|
@ -284,8 +284,9 @@ kernel::LiteKernel *CpuFullConnectionInt8KernelCreator(const std::vector<lite::T
|
|||
MS_ASSERT(opParameter != nullptr);
|
||||
MS_ASSERT(desc.type == schema::PrimitiveType_FullConnection);
|
||||
auto kernel = new (std::nothrow) FullconnectionInt8CPUKernel(opParameter, inputs, outputs, ctx, primitive);
|
||||
if (!kernel) {
|
||||
if (kernel == nullptr) {
|
||||
MS_LOG(ERROR) << "kernel is nullptr.";
|
||||
free(opParameter);
|
||||
return nullptr;
|
||||
}
|
||||
auto ret = kernel->Init();
|
||||
|
|
Loading…
Reference in New Issue