!21229 [LITE] fix magic numbers

Merge pull request !21229 from yefeng/140-fix_code-master
This commit is contained in:
i-robot 2021-08-02 12:53:02 +00:00 committed by Gitee
commit 5c67b857af
7 changed files with 49 additions and 32 deletions

View File

@ -165,6 +165,5 @@ bool IsSupportSDot() {
#endif #endif
return status; return status;
} }
} // namespace lite } // namespace lite
} // namespace mindspore } // namespace mindspore

View File

@ -34,6 +34,16 @@ using mindspore::lite::RET_OK;
using mindspore::schema::PrimitiveType_BatchNormGrad; using mindspore::schema::PrimitiveType_BatchNormGrad;
namespace mindspore::kernel { namespace mindspore::kernel {
namespace {
constexpr int kNumInputDim_0 = 0;
constexpr int kNumInputDim_1 = 1;
constexpr int kNumInputDim_2 = 2;
constexpr int kNumInputDim_3 = 3;
constexpr int kNumInputDim_4 = 4;
constexpr int kNumInputDim_5 = 4;
constexpr int kNumOutputDim_2 = 2;
constexpr int kNumJobs = 4;
} // namespace
int BNGradCPUKernelFp16::ReSize() { int BNGradCPUKernelFp16::ReSize() {
auto *input_x = in_tensors_.at(1); auto *input_x = in_tensors_.at(1);
int channels = input_x->shape().at(kNHWC_C); int channels = input_x->shape().at(kNHWC_C);
@ -52,16 +62,16 @@ int BNGradCPUKernelFp16::Init() {
} }
int BNGradCPUKernelFp16::Execute(int task_id) { int BNGradCPUKernelFp16::Execute(int task_id) {
auto *input_yt = in_tensors_.at(0); auto *input_yt = in_tensors_.at(kNumInputDim_0);
auto *input_x = in_tensors_.at(1); auto *input_x = in_tensors_.at(kNumInputDim_1);
auto *input_scale = in_tensors_.at(2); auto *input_scale = in_tensors_.at(kNumInputDim_2);
auto *input_mean = in_tensors_.at(3); auto *input_mean = in_tensors_.at(kNumInputDim_3);
auto *input_var = in_tensors_.at(4); auto *input_var = in_tensors_.at(kNumInputDim_4);
auto kernel_name = this->name(); auto kernel_name = this->name();
if (kernel_name.find("FusedBatchNormGradCPU") != std::string::npos) { if (kernel_name.find("FusedBatchNormGradCPU") != std::string::npos) {
input_mean = in_tensors_.at(4); input_mean = in_tensors_.at(kNumInputDim_4);
input_var = in_tensors_.at(5); input_var = in_tensors_.at(kNumInputDim_5);
} }
auto bn_param = reinterpret_cast<BNGradParameter *>(op_parameter_); auto bn_param = reinterpret_cast<BNGradParameter *>(op_parameter_);
int stage = stage_; int stage = stage_;
@ -71,7 +81,7 @@ int BNGradCPUKernelFp16::Execute(int task_id) {
auto *output_dx = out_tensors_.at(0); auto *output_dx = out_tensors_.at(0);
auto *output_scale = out_tensors_.at(1); auto *output_scale = out_tensors_.at(1);
auto *output_bias = out_tensors_.at(2); auto *output_bias = out_tensors_.at(kNumOutputDim_2);
int32_t batch = input_x->Batch(); int32_t batch = input_x->Batch();
int32_t channels = input_x->Channel(); int32_t channels = input_x->Channel();
int32_t spatial = input_x->Height() * input_x->Width(); int32_t spatial = input_x->Height() * input_x->Width();
@ -91,7 +101,7 @@ int BNGradCPUKernelFp16::Execute(int task_id) {
count = (count < 0) ? 0 : count; count = (count < 0) ? 0 : count;
switch (stage) { switch (stage) {
case 0: { case 0: {
for (int job = task_id; job < 4; job += thread_num) { for (int job = task_id; job < kNumJobs; job += thread_num) {
switch (job) { switch (job) {
case 0: case 0:
var2InvarFp16(save_var, input_var->ElementsNum(), bn_param->epsilon_); var2InvarFp16(save_var, input_var->ElementsNum(), bn_param->epsilon_);

View File

@ -41,7 +41,6 @@ int DropoutGradCPUKernelFp16::Init() {
MS_LOG(ERROR) << "unsupported ratio value - Dropout ratio should be between zero to one"; MS_LOG(ERROR) << "unsupported ratio value - Dropout ratio should be between zero to one";
return RET_ERROR; return RET_ERROR;
} }
if (ratio >= 1.0f) { if (ratio >= 1.0f) {
scale_ = 1.0f; scale_ = 1.0f;
} else { } else {

View File

@ -30,6 +30,16 @@ using mindspore::lite::RET_OK;
using mindspore::schema::PrimitiveType_LayerNormGrad; using mindspore::schema::PrimitiveType_LayerNormGrad;
namespace mindspore::kernel { namespace mindspore::kernel {
namespace {
constexpr int kNumInputDim_0 = 0;
constexpr int kNumInputDim_1 = 1;
constexpr int kNumInputDim_2 = 2;
constexpr int kNumInputDim_3 = 3;
constexpr int kNumInputDim_4 = 4;
constexpr int kNumOutputDim_0 = 0;
constexpr int kNumOutputDim_1 = 1;
constexpr int kNumOutputDim_2 = 2;
} // namespace
int LayerNormGradCPUKernelFp16::ReSize() { return RET_OK; } int LayerNormGradCPUKernelFp16::ReSize() { return RET_OK; }
int LayerNormGradCPUKernelFp16::Init() { int LayerNormGradCPUKernelFp16::Init() {
@ -63,14 +73,14 @@ int LayerNormGradCPUKernelFp16::Init() {
} }
int LayerNormGradCPUKernelFp16::Execute(int task_id) { int LayerNormGradCPUKernelFp16::Execute(int task_id) {
auto input_x = in_tensors_.at(0); auto input_x = in_tensors_.at(kNumInputDim_0);
auto input_dy = in_tensors_.at(1); auto input_dy = in_tensors_.at(kNumInputDim_1);
auto input_var = in_tensors_.at(2); auto input_var = in_tensors_.at(kNumInputDim_2);
auto input_mean = in_tensors_.at(3); auto input_mean = in_tensors_.at(kNumInputDim_3);
auto input_gamma = in_tensors_.at(4); auto input_gamma = in_tensors_.at(kNumInputDim_4);
auto output_dx = out_tensors_.at(0); auto output_dx = out_tensors_.at(kNumOutputDim_0);
auto output_dg = out_tensors_.at(1); auto output_dg = out_tensors_.at(kNumOutputDim_1);
auto output_db = out_tensors_.at(2); auto output_db = out_tensors_.at(kNumOutputDim_2);
float16_t *x = reinterpret_cast<float16_t *>(input_x->data_c()); float16_t *x = reinterpret_cast<float16_t *>(input_x->data_c());
float16_t *dy = reinterpret_cast<float16_t *>(input_dy->data_c()); float16_t *dy = reinterpret_cast<float16_t *>(input_dy->data_c());

View File

@ -29,24 +29,23 @@ using mindspore::schema::PrimitiveType_AvgPoolGrad;
using mindspore::schema::PrimitiveType_MaxPoolGrad; using mindspore::schema::PrimitiveType_MaxPoolGrad;
namespace mindspore::kernel { namespace mindspore::kernel {
namespace {
constexpr int kNumInputDim_2 = 2;
constexpr int kNumShapeDim_2 = 2;
} // namespace
int PoolingGradCPUKernelFp16::ReSize() { int PoolingGradCPUKernelFp16::ReSize() {
PoolingParameter *pool_param = reinterpret_cast<PoolingParameter *>(op_parameter_); PoolingParameter *pool_param = reinterpret_cast<PoolingParameter *>(op_parameter_);
auto in_shape = in_tensors_.at(0)->shape(); auto in_shape = in_tensors_.at(0)->shape();
auto out_shape = in_tensors_.at(1)->shape(); auto out_shape = in_tensors_.at(1)->shape();
if (pool_param->pool_mode_ == PoolMode_AvgPool) { if (pool_param->pool_mode_ == PoolMode_AvgPool) {
out_shape = in_tensors_.at(2)->shape(); out_shape = in_tensors_.at(kNumInputDim_2)->shape();
} }
int input_h = in_shape.at(1); int input_h = in_shape.at(1);
int input_w = in_shape.at(2); int input_w = in_shape.at(kNumShapeDim_2);
if (pool_param->global_) { if (pool_param->global_) {
pool_param->window_w_ = input_w; pool_param->window_w_ = input_w;
pool_param->window_h_ = input_h; pool_param->window_h_ = input_h;
} }
pool_param->input_h_ = in_shape[kNHWC_H]; pool_param->input_h_ = in_shape[kNHWC_H];
pool_param->input_w_ = in_shape[kNHWC_W]; pool_param->input_w_ = in_shape[kNHWC_W];
pool_param->input_batch_ = in_shape[kNHWC_N]; pool_param->input_batch_ = in_shape[kNHWC_N];
@ -55,7 +54,6 @@ int PoolingGradCPUKernelFp16::ReSize() {
pool_param->output_w_ = out_shape[kNHWC_W]; pool_param->output_w_ = out_shape[kNHWC_W];
pool_param->output_batch_ = out_shape[kNHWC_N]; pool_param->output_batch_ = out_shape[kNHWC_N];
pool_param->output_channel_ = out_shape[kNHWC_C]; pool_param->output_channel_ = out_shape[kNHWC_C];
return RET_OK; return RET_OK;
} }
@ -73,11 +71,11 @@ int PoolingGradCPUKernelFp16::Execute(int task_id) {
std::fill(output_ptr + task_id * stride * in_batch_size, output_ptr + ((task_id * stride) + count) * in_batch_size, std::fill(output_ptr + task_id * stride * in_batch_size, output_ptr + ((task_id * stride) + count) * in_batch_size,
0.f); 0.f);
if (pool_param->pool_mode_ == PoolMode_MaxPool) { if (pool_param->pool_mode_ == PoolMode_MaxPool) {
auto dy_ptr = reinterpret_cast<float16_t *>(in_tensors_.at(2)->data_c()); auto dy_ptr = reinterpret_cast<float16_t *>(in_tensors_.at(kNumInputDim_2)->data_c());
MaxPoolingFp16Grad(input_ptr + task_id * stride * in_batch_size, dy_ptr + task_id * stride * out_batch_size, MaxPoolingFp16Grad(input_ptr + task_id * stride * in_batch_size, dy_ptr + task_id * stride * out_batch_size,
output_ptr + task_id * stride * in_batch_size, count, pool_param); output_ptr + task_id * stride * in_batch_size, count, pool_param);
} else { } else {
input_ptr = reinterpret_cast<float16_t *>(in_tensors_.at(2)->data_c()); input_ptr = reinterpret_cast<float16_t *>(in_tensors_.at(kNumInputDim_2)->data_c());
AvgPoolingFp16Grad(input_ptr + task_id * stride * out_batch_size, output_ptr + task_id * stride * in_batch_size, AvgPoolingFp16Grad(input_ptr + task_id * stride * out_batch_size, output_ptr + task_id * stride * in_batch_size,
count, pool_param); count, pool_param);
} }

View File

@ -46,7 +46,6 @@ int ResizeGradCPUKernelFp16::ReSize() {
param->out_width_ = static_cast<size_t>(out_tensors_.at(0)->Width()); param->out_width_ = static_cast<size_t>(out_tensors_.at(0)->Width());
param->height_scale_ = ScalingFp16(param->out_height_, param->in_height_, align_corners); param->height_scale_ = ScalingFp16(param->out_height_, param->in_height_, align_corners);
param->width_scale_ = ScalingFp16(param->out_width_, param->in_width_, align_corners); param->width_scale_ = ScalingFp16(param->out_width_, param->in_width_, align_corners);
return RET_OK; return RET_OK;
} }
@ -67,7 +66,6 @@ int ResizeGradCPUKernelFp16::Execute(int task_id) {
} }
auto batch_size = in_tensors_.at(0)->Batch(); auto batch_size = in_tensors_.at(0)->Batch();
auto channel = in_tensors_.at(0)->Channel(); auto channel = in_tensors_.at(0)->Channel();
if (param->method == static_cast<int>(schema::ResizeMethod_NEAREST)) { if (param->method == static_cast<int>(schema::ResizeMethod_NEAREST)) {
ResizeNearestNeighborFp16Grad(in_addr, out_addr, batch_size, channel, in_tensors_.at(0)->format(), param); ResizeNearestNeighborFp16Grad(in_addr, out_addr, batch_size, channel, in_tensors_.at(0)->format(), param);
} else { } else {

View File

@ -23,6 +23,9 @@ using mindspore::lite::RET_OK;
using mindspore::schema::PrimitiveType_FusedBatchNorm; using mindspore::schema::PrimitiveType_FusedBatchNorm;
namespace mindspore::kernel { namespace mindspore::kernel {
namespace {
constexpr int kNumInputSize = 5;
} // namespace
int FusedBatchnormCPUKernel::ReSize() { int FusedBatchnormCPUKernel::ReSize() {
FreeMeanAndVariance(); FreeMeanAndVariance();
FreeScaleAndOffset(); FreeScaleAndOffset();
@ -66,7 +69,7 @@ int FusedBatchnormCPUKernel::InitConstTensor() {
int FusedBatchnormCPUKernel::Run() { int FusedBatchnormCPUKernel::Run() {
auto param = reinterpret_cast<BatchNormParameter *>(op_parameter_); auto param = reinterpret_cast<BatchNormParameter *>(op_parameter_);
if (IsTrain() && IsTrainable() && in_tensors_.size() >= 5) { if (IsTrain() && IsTrainable() && in_tensors_.size() >= kNumInputSize) {
float *in = static_cast<float *>(in_tensors_[0]->MutableData()); float *in = static_cast<float *>(in_tensors_[0]->MutableData());
float *scale = static_cast<float *>(in_tensors_[1]->MutableData()); float *scale = static_cast<float *>(in_tensors_[1]->MutableData());
float *offset = static_cast<float *>(in_tensors_[2]->MutableData()); float *offset = static_cast<float *>(in_tensors_[2]->MutableData());