forked from mindspore-Ecosystem/mindspore
Fix bug of leakyRelu_Int8 operator.
This commit is contained in:
parent
39874d133f
commit
6835d45691
|
@ -1,26 +0,0 @@
|
|||
/**
|
||||
* Copyright 2020 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
// * http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#include "nnacl/fp32/leaky_relu.h"
|
||||
|
||||
void DoLeakyRelu(float *input, float *output, LeakyReluParameter *param, int task_id) {
|
||||
for (int i = task_id; i < param->input_num_; i += param->op_parameter_.thread_num_) {
|
||||
if (input[i] <= 0) {
|
||||
output[i] = input[i] * param->slope_[0];
|
||||
} else {
|
||||
output[i] = input[i];
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,30 +0,0 @@
|
|||
/**
|
||||
* Copyright 2020 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#ifndef MINDSPORE_LITE_NNACL_PRELU_H_
|
||||
#define MINDSPORE_LITE_NNACL_PRELU_H_
|
||||
|
||||
#include "nnacl/op_base.h"
|
||||
#include "nnacl/leaky_relu_parameter.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
void DoLeakyRelu(float *input, float *output, LeakyReluParameter *prelu_param_, int task_id);
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif // MINDSPORE_LITE_NNACL_PRELU_H_
|
|
@ -17,8 +17,7 @@
|
|||
#include "nnacl/int8/leaky_relu_int8.h"
|
||||
#include "nnacl/errorcode.h"
|
||||
|
||||
int DoLeakReluInt8(int8_t *inputs, int8_t *output_ptr, LeakyReluQuantArg *quant_prelu_parm, QuantArg *input_quant,
|
||||
int task_id) {
|
||||
int DoLeakReluInt8(int8_t *inputs, int8_t *output_ptr, LeakyReluQuantArg *quant_prelu_parm, int task_id) {
|
||||
if (quant_prelu_parm == NULL) {
|
||||
return NNACL_NULL_PTR;
|
||||
}
|
||||
|
@ -27,17 +26,12 @@ int DoLeakReluInt8(int8_t *inputs, int8_t *output_ptr, LeakyReluQuantArg *quant_
|
|||
const float output_inverse_scale = 1.f / output_scale;
|
||||
int output_dim = quant_prelu_parm->input_dim_;
|
||||
|
||||
float scale = quant_prelu_parm->quant_arg.in_args_.scale_ * output_inverse_scale;
|
||||
float bias = -quant_prelu_parm->quant_arg.in_args_.zp_ * scale;
|
||||
for (int i = 0; i < output_dim; i++) {
|
||||
input_quant[i].scale_ = quant_prelu_parm->quant_arg.in_args_.scale_;
|
||||
input_quant[i].zp_ = quant_prelu_parm->quant_arg.in_args_.zp_;
|
||||
}
|
||||
|
||||
for (int i = 0; i < output_dim; i++) {
|
||||
float scale = input_quant[i].scale_ * output_inverse_scale;
|
||||
float bias = -input_quant[i].zp_ * scale;
|
||||
for (int j = task_id; j < quant_prelu_parm->element_num; j += quant_prelu_parm->op_parameter_.thread_num_) {
|
||||
if (inputs[j] <= 0) {
|
||||
int32_t output_tmp = round(inputs[j] * quant_prelu_parm->slope_[0] * scale + bias) + output_zp;
|
||||
int32_t output_tmp = round(inputs[j] * quant_prelu_parm->slope_ * scale + bias) + output_zp;
|
||||
if (output_tmp > 127) {
|
||||
output_ptr[j] = 127;
|
||||
} else if (output_tmp < -128) {
|
||||
|
@ -57,6 +51,5 @@ int DoLeakReluInt8(int8_t *inputs, int8_t *output_ptr, LeakyReluQuantArg *quant_
|
|||
}
|
||||
}
|
||||
}
|
||||
free(input_quant);
|
||||
return NNACL_OK;
|
||||
}
|
||||
|
|
|
@ -23,8 +23,7 @@
|
|||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
int DoLeakReluInt8(int8_t *inputs, int8_t *output_ptr, LeakyReluQuantArg *quant_Prelu_parm, QuantArg *input_quant,
|
||||
int task_id);
|
||||
int DoLeakReluInt8(int8_t *inputs, int8_t *output_ptr, LeakyReluQuantArg *quant_Prelu_parm, int task_id);
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -1,29 +0,0 @@
|
|||
/**
|
||||
* Copyright 2020 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDSPORE_LITE_NNACL_LEAKY_RELU_PARAMETER_H_
|
||||
#define MINDSPORE_LITE_NNACL_LEAKY_RELU_PARAMETER_H_
|
||||
|
||||
#include "nnacl/op_base.h"
|
||||
|
||||
typedef struct LeakyReluParameter {
|
||||
OpParameter op_parameter_;
|
||||
float *slope_;
|
||||
size_t slope_num_;
|
||||
int input_num_;
|
||||
} LeakyReluParameter;
|
||||
|
||||
#endif // MINDSPORE_LITE_NNACL_LEAKY_RELU_PARAMETER_H_
|
|
@ -252,7 +252,7 @@ typedef struct PowerQuantArg {
|
|||
typedef struct LeakyReluQuantArg {
|
||||
OpParameter op_parameter_;
|
||||
PreluQuantArg quant_arg;
|
||||
float *slope_;
|
||||
float slope_;
|
||||
int64_t axis_;
|
||||
const int *in_shape_;
|
||||
const int *out_shape_;
|
||||
|
|
|
@ -170,7 +170,6 @@
|
|||
#include "nnacl/fp32/lstm.h"
|
||||
#include "nnacl/fp32/embedding_lookup.h"
|
||||
#include "nnacl/fp32/elu.h"
|
||||
#include "nnacl/leaky_relu_parameter.h"
|
||||
#include "mindspore/lite/nnacl/fp32/sparse_to_dense.h"
|
||||
#include "nnacl/l2_norm_parameter.h"
|
||||
#include "nnacl/detection_post_process_parameter.h"
|
||||
|
@ -253,26 +252,6 @@ OpParameter *PopulatePReLUParameter(const mindspore::lite::PrimitiveC *primitive
|
|||
return reinterpret_cast<OpParameter *>(prelu_param);
|
||||
}
|
||||
|
||||
OpParameter *PopulateLeakyReluParameter(const mindspore::lite::PrimitiveC *primitive) {
|
||||
auto param = dynamic_cast<const mindspore::lite::LeakyReLU *>(primitive);
|
||||
LeakyReluParameter *leaky_relu_param = reinterpret_cast<LeakyReluParameter *>(malloc(sizeof(LeakyReluParameter)));
|
||||
if (leaky_relu_param == nullptr) {
|
||||
MS_LOG(ERROR) << "malloc LeakyReluParameter failed.";
|
||||
return nullptr;
|
||||
}
|
||||
memset(leaky_relu_param, 0, sizeof(LeakyReluParameter));
|
||||
leaky_relu_param->op_parameter_.type_ = primitive->Type();
|
||||
leaky_relu_param->slope_ = reinterpret_cast<float *>(malloc(sizeof(float)));
|
||||
if (leaky_relu_param->slope_ == nullptr) {
|
||||
MS_LOG(ERROR) << "malloc relu slope fail!";
|
||||
free(leaky_relu_param);
|
||||
return nullptr;
|
||||
}
|
||||
leaky_relu_param->slope_[0] = param->GetNegativeSlope();
|
||||
leaky_relu_param->slope_num_ = 1;
|
||||
return reinterpret_cast<OpParameter *>(leaky_relu_param);
|
||||
}
|
||||
|
||||
OpParameter *PopulatePoolingParameter(const mindspore::lite::PrimitiveC *primitive) {
|
||||
auto pooling_primitive =
|
||||
reinterpret_cast<mindspore::lite::Pooling *>(const_cast<mindspore::lite::PrimitiveC *>(primitive));
|
||||
|
@ -1701,7 +1680,6 @@ PopulateParameterRegistry::PopulateParameterRegistry() {
|
|||
populate_parameter_funcs_[schema::PrimitiveType_Squeeze] = PopulateSqueezeParameter;
|
||||
populate_parameter_funcs_[schema::PrimitiveType_Split] = PopulateSplitParameter;
|
||||
populate_parameter_funcs_[schema::PrimitiveType_PReLU] = PopulatePReLUParameter;
|
||||
populate_parameter_funcs_[schema::PrimitiveType_LeakyReLU] = PopulateLeakyReluParameter;
|
||||
populate_parameter_funcs_[schema::PrimitiveType_PriorBox] = PopulatePriorBoxParameter;
|
||||
populate_parameter_funcs_[schema::PrimitiveType_QuantDTypeCast] = PopulateQuantDTypeCastParameter;
|
||||
populate_parameter_funcs_[schema::PrimitiveType_Lstm] = PopulateLstmParameter;
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
#include <vector>
|
||||
#include "src/lite_kernel.h"
|
||||
#include "nnacl/leaky_relu_parameter.h"
|
||||
#include "src/runtime/kernel/arm/base/layout_transform.h"
|
||||
|
||||
using mindspore::lite::InnerContext;
|
||||
|
|
|
@ -1,101 +0,0 @@
|
|||
/**
|
||||
* Copyright 2020 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#include "src/runtime/kernel/arm/fp32/leaky_relu.h"
|
||||
#include <vector>
|
||||
#include "schema/model_generated.h"
|
||||
#include "nnacl/fp32/leaky_relu.h"
|
||||
#include "src/kernel_registry.h"
|
||||
#include "include/errorcode.h"
|
||||
#include "src/runtime/runtime_api.h"
|
||||
|
||||
using mindspore::kernel::KERNEL_ARCH::kCPU;
|
||||
using mindspore::lite::KernelRegistrar;
|
||||
using mindspore::lite::RET_ERROR;
|
||||
using mindspore::lite::RET_OK;
|
||||
using mindspore::schema::PrimitiveType_LeakyReLU;
|
||||
|
||||
namespace mindspore::kernel {
|
||||
namespace {
|
||||
int LeakyReluRun(void *cdata, int task_id) {
|
||||
auto kernel_relu = reinterpret_cast<LeakyReluCPUKernel *>(cdata);
|
||||
auto ret = kernel_relu->DoExcute(task_id);
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(ERROR) << "LeakyReluRun error task_id[" << task_id << "] error_code[" << ret << "]";
|
||||
return RET_ERROR;
|
||||
}
|
||||
return RET_OK;
|
||||
}
|
||||
} // namespace
|
||||
|
||||
LeakyReluCPUKernel::~LeakyReluCPUKernel() {
|
||||
if (prelu_param_->slope_ != nullptr) {
|
||||
free(prelu_param_->slope_);
|
||||
prelu_param_->slope_ = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
int LeakyReluCPUKernel::Init() { return RET_OK; }
|
||||
|
||||
int LeakyReluCPUKernel::DoExcute(int task_id) {
|
||||
DoLeakyRelu(input_data, output_data, prelu_param_, task_id);
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
int LeakyReluCPUKernel::Run() {
|
||||
auto prepare_ret = Prepare();
|
||||
if (prepare_ret != RET_OK) {
|
||||
MS_LOG(ERROR) << "Prepare fail!ret: " << prepare_ret;
|
||||
return prepare_ret;
|
||||
}
|
||||
auto input = in_tensors_.at(0);
|
||||
prelu_param_->input_num_ = input->ElementsNum();
|
||||
input_data = reinterpret_cast<float *>(input->MutableData());
|
||||
output_data = reinterpret_cast<float *>(out_tensors_.at(0)->MutableData());
|
||||
|
||||
auto ret = ParallelLaunch(this->context_->thread_pool_, LeakyReluRun, this, context_->thread_num_);
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(ERROR) << "PReluDwRun error: error_code[" << ret << "]";
|
||||
return RET_ERROR;
|
||||
}
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
kernel::LiteKernel *CpuLeakyReluFp32KernelCreator(const std::vector<lite::Tensor *> &inputs,
|
||||
const std::vector<lite::Tensor *> &outputs, OpParameter *param,
|
||||
const lite::InnerContext *ctx, const kernel::KernelKey &desc,
|
||||
const mindspore::lite::PrimitiveC *primitive) {
|
||||
if (param == nullptr) {
|
||||
MS_LOG(ERROR) << "input param is nullptr!";
|
||||
return nullptr;
|
||||
}
|
||||
MS_ASSERT(desc.type == schema::PrimitiveType_LeakyRelu);
|
||||
auto *kernel = new (std::nothrow) LeakyReluCPUKernel(param, inputs, outputs, ctx, primitive);
|
||||
if (kernel == nullptr) {
|
||||
MS_LOG(ERROR) << "new LeakyReluCPUKernel fail!";
|
||||
return nullptr;
|
||||
}
|
||||
auto ret = kernel->Init();
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(ERROR) << "Init kernel failed, name: " << param->name_
|
||||
<< ", type: " << schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(param->type_));
|
||||
delete kernel;
|
||||
return nullptr;
|
||||
}
|
||||
return kernel;
|
||||
}
|
||||
|
||||
REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_LeakyReLU, CpuLeakyReluFp32KernelCreator)
|
||||
} // namespace mindspore::kernel
|
|
@ -1,52 +0,0 @@
|
|||
/**
|
||||
* Copyright 2020 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_LEAKY_RELU_H_
|
||||
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_LEAKY_RELU_H_
|
||||
|
||||
#include <vector>
|
||||
#include "src/lite_kernel.h"
|
||||
#include "include/context.h"
|
||||
#include "nnacl/fp32/leaky_relu.h"
|
||||
#include "src/runtime/kernel/arm/base/layout_transform.h"
|
||||
|
||||
using mindspore::lite::InnerContext;
|
||||
|
||||
namespace mindspore::kernel {
|
||||
class LeakyReluCPUKernel : public LiteKernel {
|
||||
public:
|
||||
LeakyReluCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs,
|
||||
const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx,
|
||||
const mindspore::lite::PrimitiveC *primitive)
|
||||
: LiteKernel(parameter, inputs, outputs, ctx, primitive) {
|
||||
prelu_param_ = (reinterpret_cast<LeakyReluParameter *>(op_parameter_));
|
||||
primitive_ = primitive;
|
||||
}
|
||||
~LeakyReluCPUKernel();
|
||||
|
||||
int Init() override;
|
||||
int ReSize() override { return 0; }
|
||||
int Run() override;
|
||||
int DoExcute(int task_id);
|
||||
|
||||
protected:
|
||||
LeakyReluParameter *prelu_param_;
|
||||
|
||||
private:
|
||||
float *input_data = nullptr;
|
||||
float *output_data = nullptr;
|
||||
};
|
||||
} // namespace mindspore::kernel
|
||||
#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_LEAKY_RELU_H_
|
|
@ -14,10 +14,10 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "src/runtime/kernel/arm/fp32/activation.h"
|
||||
#include "src/runtime/kernel/arm/int8/relux_int8.h"
|
||||
#include "src/runtime/kernel/arm/int8/hswish_int8.h"
|
||||
#include "src/runtime/kernel/arm/int8/sigmoid_int8.h"
|
||||
#include "src/runtime/kernel/arm/int8/leaky_relu_int8.h"
|
||||
#include "schema/model_generated.h"
|
||||
#include "src/kernel_registry.h"
|
||||
#include "src/runtime/runtime_api.h"
|
||||
|
@ -54,6 +54,9 @@ kernel::LiteKernel *CpuActivationInt8KernelCreator(const std::vector<lite::Tenso
|
|||
case schema::ActivationType_SIGMOID:
|
||||
kernel = new (std::nothrow) SigmoidInt8CPUKernel(parameter, inputs, outputs, ctx, primitive);
|
||||
break;
|
||||
case schema::ActivationType_LEAKY_RELU:
|
||||
kernel = new (std::nothrow) LeakyReluInt8CPUKernel(parameter, inputs, outputs, ctx, primitive);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
|
||||
#include "src/runtime/kernel/arm/int8/leaky_relu_int8.h"
|
||||
#include <limits>
|
||||
#include "nnacl/fp32/activation.h"
|
||||
#include "nnacl/int8/leaky_relu_int8.h"
|
||||
#include "src/runtime/runtime_api.h"
|
||||
#include "src/kernel_registry.h"
|
||||
|
@ -43,15 +44,9 @@ int LeakyReluInt8Run(void *cdata, int task_id) {
|
|||
|
||||
int LeakyReluInt8CPUKernel::Init() {
|
||||
LeakyReluBaseCPUKernel::Init();
|
||||
LeakyReluParameter *param = reinterpret_cast<LeakyReluParameter *>(op_parameter_);
|
||||
quant_prelu_parm_.slope_ = reinterpret_cast<float *>(malloc(param->slope_num_ * sizeof(float)));
|
||||
if (quant_prelu_parm_.slope_ == nullptr) {
|
||||
MS_LOG(ERROR) << "malloc data fail!";
|
||||
return RET_ERROR;
|
||||
}
|
||||
for (size_t i = 0; i < param->slope_num_; ++i) {
|
||||
quant_prelu_parm_.slope_[i] = param->slope_[i];
|
||||
}
|
||||
quant_prelu_parm_.op_parameter_ = *op_parameter_;
|
||||
quant_prelu_parm_.slope_ = reinterpret_cast<ActivationParameter *>(op_parameter_)->alpha_;
|
||||
|
||||
auto *input_tensor = in_tensors_.at(kInputIndex);
|
||||
auto in_quant_args = input_tensor->GetQuantParams();
|
||||
quant_prelu_parm_.quant_arg.in_args_.scale_ = in_quant_args.front().scale;
|
||||
|
@ -82,14 +77,6 @@ int LeakyReluInt8CPUKernel::Init() {
|
|||
}
|
||||
|
||||
LeakyReluInt8CPUKernel::~LeakyReluInt8CPUKernel() {
|
||||
if (quant_prelu_parm_.slope_ != nullptr) {
|
||||
free(quant_prelu_parm_.slope_);
|
||||
quant_prelu_parm_.slope_ = nullptr;
|
||||
}
|
||||
if (input_quant_ != nullptr) {
|
||||
free(input_quant_);
|
||||
input_quant_ = nullptr;
|
||||
}
|
||||
if (quant_prelu_parm_.in_shape_ != nullptr) {
|
||||
free(const_cast<int *>(quant_prelu_parm_.in_shape_));
|
||||
quant_prelu_parm_.in_shape_ = nullptr;
|
||||
|
@ -105,10 +92,6 @@ int LeakyReluInt8CPUKernel::ReSize() {
|
|||
auto *out_tensor = out_tensors_.at(kOutputIndex);
|
||||
auto input_dim = input_tensor->shape().size();
|
||||
MS_ASSERT(input_dim <= CROP_OFFSET_MAX_SIZE);
|
||||
if (input_quant_ != nullptr) {
|
||||
free(input_quant_);
|
||||
input_quant_ = nullptr;
|
||||
}
|
||||
quant_prelu_parm_.input_dim_ = input_dim;
|
||||
quant_prelu_parm_.element_num = in_tensors_[0]->Size();
|
||||
auto input_shape = input_tensor->shape();
|
||||
|
@ -128,11 +111,6 @@ int LeakyReluInt8CPUKernel::ReSize() {
|
|||
memcpy(reinterpret_cast<void *>(const_cast<int *>(quant_prelu_parm_.out_shape_)), output_shape.data(),
|
||||
sizeof(int) * output_dim);
|
||||
}
|
||||
input_quant_ = static_cast<QuantArg *>(malloc(sizeof(QuantArg) * input_dim));
|
||||
if (input_quant_ == nullptr) {
|
||||
MS_LOG(ERROR) << "malloc memory failed";
|
||||
return RET_MEMORY_FAILED;
|
||||
}
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
|
@ -154,7 +132,7 @@ int LeakyReluInt8CPUKernel::DoExecute(int task_id) {
|
|||
auto out_tensor = out_tensors_.at(kOutputIndex);
|
||||
int8_t *input_data = reinterpret_cast<int8_t *>(input_tensor->MutableData());
|
||||
int8_t *output_data = reinterpret_cast<int8_t *>(out_tensor->MutableData());
|
||||
auto ret = DoLeakReluInt8(input_data, output_data, &quant_prelu_parm_, input_quant_, task_id);
|
||||
auto ret = DoLeakReluInt8(input_data, output_data, &quant_prelu_parm_, task_id);
|
||||
if (ret != NNACL_OK) {
|
||||
MS_LOG(ERROR) << "DoLeakReluInt8 failed";
|
||||
return RET_ERROR;
|
||||
|
|
|
@ -39,7 +39,6 @@ class LeakyReluInt8CPUKernel : public LeakyReluBaseCPUKernel {
|
|||
|
||||
private:
|
||||
LeakyReluQuantArg quant_prelu_parm_;
|
||||
QuantArg *input_quant_ = nullptr;
|
||||
};
|
||||
} // namespace mindspore::kernel
|
||||
|
||||
|
|
|
@ -39,11 +39,15 @@ int ReduceInt8CPUKernel::Init() {
|
|||
if (ret != RET_OK) {
|
||||
return ret;
|
||||
}
|
||||
ret = CalculateQuantArgs();
|
||||
if (ret != RET_OK) {
|
||||
return ret;
|
||||
if (!this->in_tensors_[0]->shape().empty()) {
|
||||
this->valid_shape_ = true;
|
||||
ret = CalculateQuantArgs();
|
||||
if (ret != RET_OK) {
|
||||
return ret;
|
||||
}
|
||||
} else {
|
||||
this->valid_shape_ = false;
|
||||
}
|
||||
|
||||
switch (mode_) {
|
||||
case static_cast<int>(ReduceMode_ReduceMean): {
|
||||
reducer_ = ReduceMeanInt8;
|
||||
|
@ -248,6 +252,12 @@ int ReduceInt8CPUKernel::Run() {
|
|||
MS_LOG(ERROR) << "Prepare fail!ret: " << prepare_ret;
|
||||
return prepare_ret;
|
||||
}
|
||||
if (!this->valid_shape_) {
|
||||
auto ret = CalculateQuantArgs();
|
||||
if (ret != RET_OK) {
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
auto ret = MallocTmpBuffer();
|
||||
if (ret != RET_OK) {
|
||||
FreeTmpBuffer();
|
||||
|
|
|
@ -82,6 +82,7 @@ class ReduceInt8CPUKernel : public ReduceBaseCPUKernel {
|
|||
std::vector<int32_t *> data_buffers_;
|
||||
const int32_t *src_data_ = nullptr;
|
||||
int32_t *dst_data_ = nullptr;
|
||||
bool valid_shape_ = false;
|
||||
|
||||
Reducer reducer_ = nullptr;
|
||||
LastReducer last_reducer_ = nullptr;
|
||||
|
|
|
@ -66,8 +66,7 @@ TEST_F(TestPreluInt8, prelu_1) {
|
|||
|
||||
LeakyReluQuantArg op_param;
|
||||
op_param.op_parameter_.type_ = schema::PrimitiveType_LeakyReLU;
|
||||
op_param.slope_ = reinterpret_cast<float *>(malloc(sizeof(float)));
|
||||
op_param.slope_[0] = 0.25;
|
||||
op_param.slope_ = 0.25;
|
||||
|
||||
lite::InnerContext *ctx = new lite::InnerContext;
|
||||
ctx->thread_num_ = 2;
|
||||
|
|
Loading…
Reference in New Issue