[MSLITE][DEVELOP] add quant arg for tensor api

This commit is contained in:
yangruoqi713 2021-07-27 20:10:01 +08:00
parent 7d39f5f3a8
commit b0ef53b699
65 changed files with 368 additions and 277 deletions

View File

@ -57,6 +57,12 @@ enum OptimizationLevel : uint32_t {
kOptimizationType = 0xFFFFFFFF
};
struct QuantParam {
int bit_num;
double scale;
int32_t zero_point;
};
class Allocator;
class MS_API MSTensor {
public:
@ -103,6 +109,8 @@ class MS_API MSTensor {
void SetFormat(mindspore::Format format);
mindspore::Format format() const;
void SetData(void *data);
std::vector<QuantParam> QuantParams() const;
void SetQuantParams(std::vector<QuantParam> quant_args);
const std::shared_ptr<Impl> impl() const { return impl_; }
private:
@ -182,7 +190,6 @@ using Key = struct Key {
};
constexpr char kDecModeAesGcm[] = "AES-GCM";
/// \brief CallBackParam defined input arguments for callBack function.
struct MSCallBackParam {
std::string node_name_; /**< node name argument */

View File

@ -337,7 +337,6 @@ int64_t MSTensor::ElementNum() const {
// element number of scalar is 1
return 1;
}
return std::accumulate(shape.begin(), shape.end(), 1, std::multiplies<int64_t>());
}
@ -361,6 +360,26 @@ bool MSTensor::IsDevice() const {
return impl_->IsDevice();
}
void MSTensor::SetShape(const std::vector<int64_t> &shape) { MS_LOG_EXCEPTION << "Invalid implement."; }
void MSTensor::SetDataType(enum DataType data_type) { MS_LOG_EXCEPTION << "Invalid implement."; }
void MSTensor::SetTensorName(const std::string &name) { MS_LOG_EXCEPTION << "Invalid implement."; }
void MSTensor::SetAllocator(std::shared_ptr<Allocator> allocator) { MS_LOG_EXCEPTION << "Invalid implement."; }
std::shared_ptr<Allocator> MSTensor::allocator() const { MS_LOG_EXCEPTION << "Invalid implement."; }
void MSTensor::SetFormat(mindspore::Format format) { MS_LOG_EXCEPTION << "Invalid implement."; }
mindspore::Format MSTensor::format() const { MS_LOG_EXCEPTION << "Invalid implement."; }
void MSTensor::SetData(void *data) { MS_LOG_EXCEPTION << "Invalid implement."; }
std::vector<QuantParam> MSTensor::QuantParams() const { MS_LOG_EXCEPTION << "Invalid implement."; }
void MSTensor::SetQuantParams(std::vector<QuantParam> quant_params) { MS_LOG_EXCEPTION << "Invalid implement."; }
Buffer::Buffer() : impl_(std::make_shared<Impl>()) {}
Buffer::Buffer(const void *data, size_t data_len) : impl_(std::make_shared<Impl>(data, data_len)) {}
Buffer::~Buffer() = default;

View File

@ -55,6 +55,7 @@ class MSTensor;
namespace lite {
struct DeviceContext;
struct LiteQuantParam;
} // namespace lite
#ifdef NOT_USE_STL

View File

@ -17,11 +17,13 @@
#ifndef MINDSPORE_LITE_INCLUDE_MS_TENSOR_H_
#define MINDSPORE_LITE_INCLUDE_MS_TENSOR_H_
#include <vector>
#include "include/lite_utils.h"
#include "ir/dtype/type_id.h"
namespace mindspore {
enum Format : int64_t;
namespace tensor {
/// \brief MSTensor defined tensor in MindSpore Lite.
class MS_API MSTensor {
@ -117,6 +119,10 @@ class MS_API MSTensor {
/// \brief Set the data of MSTensor.
virtual void set_data(void *data) = 0;
virtual Vector<lite::LiteQuantParam> quant_params() const = 0;
virtual void set_quant_params(Vector<lite::LiteQuantParam>) = 0;
};
} // namespace tensor
} // namespace mindspore

View File

@ -201,8 +201,8 @@ void CodeGraphQuantArgsImplement(std::ofstream &ofs, const std::unique_ptr<Coder
}
Tensor *out_tensor = graph_outputs.at(kOutputIndex);
MS_CHECK_PTR_IF_NULL(out_tensor);
std::vector<QuantArg> in_quant_args = in_tensor->quant_params();
std::vector<QuantArg> out_quant_args = out_tensor->quant_params();
std::vector<LiteQuantParam> in_quant_args = in_tensor->quant_params();
std::vector<LiteQuantParam> out_quant_args = out_tensor->quant_params();
if (in_quant_args.empty() || out_quant_args.empty()) {
MS_LOG(ERROR) << "code model quant args failed";
return;

View File

@ -42,7 +42,7 @@ const char tensor_header[] = R"RAW(
namespace mindspore {
namespace lite {
struct QuantArg {
struct LiteQuantParam {
double scale;
int32_t zeroPoint;
float var_corr{1};
@ -76,13 +76,15 @@ class MTensor : public mindspore::tensor::MSTensor {
void *MutableData() override;
void *data() override { return data_; }
void set_data(void *data) override { data_ = data; }
Vector<LiteQuantParam> quant_params() const override { return this->quant_params_; }
void set_quant_params(const Vector<LiteQuantParam> quant_params) override { this->quant_params_ = quant_params; }
private:
String tensor_name_;
TypeId data_type_;
Vector<int> shape_;
void *data_ = nullptr;
Vector<QuantArg> quant_params_;
Vector<LiteQuantParam> quant_params_;
};
} // namespace lite
} // namespace mindspore

View File

@ -96,7 +96,7 @@ int CoderGraph::ConvertTensors() {
auto quant_params = origin_tensor->quantParams();
if (quant_params != nullptr) {
for (int j = 0; j < static_cast<int>(quant_params->size()); j++) {
QuantArg quant_arg{};
LiteQuantParam quant_arg{};
quant_arg.bitNum = quant_params->Get(j)->numBits();
quant_arg.scale = quant_params->Get(j)->scale();
quant_arg.zeroPoint = quant_params->Get(j)->zeroPoint();

View File

@ -184,7 +184,7 @@ int Conv2DBaseCoder::MallocQuantParam() {
int Conv2DBaseCoder::SetInputTensorQuantParam() {
size_t in_arg_num = conv_quant_arg_->input_arg_num_;
if (in_arg_num == kPerTensor) {
QuantArg input_quant_arg = input_tensor_->quant_params().at(0);
LiteQuantParam input_quant_arg = input_tensor_->quant_params().at(0);
conv_quant_arg_->input_quant_args_[0].zp_ = input_quant_arg.zeroPoint;
conv_quant_arg_->input_quant_args_[0].scale_ = static_cast<float>(input_quant_arg.scale);
return RET_OK;
@ -198,11 +198,11 @@ int Conv2DBaseCoder::SetInputTensorQuantParam() {
int Conv2DBaseCoder::SetFilterTensorQuantParam() {
size_t weight_arg_num = conv_quant_arg_->filter_arg_num_;
if (weight_arg_num == kPerTensor) {
QuantArg weight_quant_arg = filter_tensor_->quant_params().at(0);
LiteQuantParam weight_quant_arg = filter_tensor_->quant_params().at(0);
conv_quant_arg_->filter_quant_args_[0].zp_ = weight_quant_arg.zeroPoint;
conv_quant_arg_->filter_quant_args_[0].scale_ = static_cast<float>(weight_quant_arg.scale);
} else {
std::vector<QuantArg> weight_quant_arg = filter_tensor_->quant_params();
std::vector<LiteQuantParam> weight_quant_arg = filter_tensor_->quant_params();
for (int i = 0; i < static_cast<int>(weight_arg_num); ++i) {
conv_quant_arg_->filter_quant_args_[i].zp_ = weight_quant_arg[i].zeroPoint;
conv_quant_arg_->filter_quant_args_[i].scale_ = static_cast<float>(weight_quant_arg[i].scale);
@ -214,7 +214,7 @@ int Conv2DBaseCoder::SetFilterTensorQuantParam() {
int Conv2DBaseCoder::SetOutputTensorQuantParam() {
size_t out_arg_num = conv_quant_arg_->output_arg_num_;
if (out_arg_num == kPerTensor) {
QuantArg output_quant_arg = output_tensor_->quant_params().at(0);
LiteQuantParam output_quant_arg = output_tensor_->quant_params().at(0);
conv_quant_arg_->output_quant_args_[0].zp_ = output_quant_arg.zeroPoint;
conv_quant_arg_->output_quant_args_[0].scale_ = static_cast<float>(output_quant_arg.scale);
} else {

View File

@ -44,7 +44,7 @@ int DetectionPostProcessBaseCoder::Prepare(CoderContext *const context) {
Tensor *anchor_tensor = input_tensors_.at(kIndexSecond);
MS_CHECK_PTR(anchor_tensor);
if (anchor_tensor->data_type() == kNumberTypeInt8) {
QuantArg quant_param = anchor_tensor->quant_params().at(0);
LiteQuantParam quant_param = anchor_tensor->quant_params().at(0);
auto anchor_int8 = reinterpret_cast<int8_t *>(anchor_tensor->data_c());
MS_CHECK_PTR(anchor_int8);
auto anchor_fp32 = static_cast<float *>(
@ -54,7 +54,7 @@ int DetectionPostProcessBaseCoder::Prepare(CoderContext *const context) {
anchor_tensor->ElementsNum());
params_->anchors_ = anchor_fp32;
} else if (anchor_tensor->data_type() == kNumberTypeUInt8) {
QuantArg quant_param = anchor_tensor->quant_params().front();
LiteQuantParam quant_param = anchor_tensor->quant_params().front();
auto anchor_uint8 = reinterpret_cast<uint8_t *>(anchor_tensor->data_c());
MS_CHECK_PTR(anchor_uint8);
auto anchor_fp32 = static_cast<float *>(

View File

@ -116,8 +116,8 @@ int Conv2DInt8Coder::SetParameters() {
MS_CHECK_TRUE(!input_tensor_->quant_params().empty(), "input quant_params is empty");
MS_CHECK_TRUE(!output_tensor_->quant_params().empty(), "output quant_params is empty");
QuantArg input_quant_arg = input_tensor_->quant_params().at(0);
QuantArg output_quant_arg = output_tensor_->quant_params().at(0);
LiteQuantParam input_quant_arg = input_tensor_->quant_params().at(0);
LiteQuantParam output_quant_arg = output_tensor_->quant_params().at(0);
input_offset_ = -input_quant_arg.zeroPoint;
out_offset_ = output_quant_arg.zeroPoint;

View File

@ -118,8 +118,8 @@ int DWConvInt8Coder::SetParameters() {
stride_y_ = conv_param_->stride_h_;
stride_x_ = conv_param_->stride_w_;
QuantArg input_quant_arg = input_tensor_->quant_params().at(0);
QuantArg output_quant_arg = output_tensor_->quant_params().at(0);
LiteQuantParam input_quant_arg = input_tensor_->quant_params().at(0);
LiteQuantParam output_quant_arg = output_tensor_->quant_params().at(0);
output_x_ = output_tensor_->Width();
output_y_ = output_tensor_->Height();

View File

@ -55,9 +55,9 @@ int FullConnectionInt8Coder::SetParameters() {
MS_CHECK_TRUE(!input_tensor_->quant_params().empty(), "input quant_params is empty");
MS_CHECK_TRUE(!filter_tensor_->quant_params().empty(), "filter quant_params is empty");
MS_CHECK_TRUE(!output_tensor_->quant_params().empty(), "output quant_params is empty");
QuantArg input_quant_arg = input_tensor_->quant_params().at(0);
QuantArg filter_quant_arg = filter_tensor_->quant_params().at(0);
QuantArg output_quant_arg = output_tensor_->quant_params().at(0);
LiteQuantParam input_quant_arg = input_tensor_->quant_params().at(0);
LiteQuantParam filter_quant_arg = filter_tensor_->quant_params().at(0);
LiteQuantParam output_quant_arg = output_tensor_->quant_params().at(0);
double real_multiplier = input_quant_arg.scale * filter_quant_arg.scale / output_quant_arg.scale;
QuantizeMultiplier(real_multiplier, &out_multiplier_, &out_shift_);

View File

@ -93,7 +93,7 @@ int PoolingInt8Coder::SetParameters() {
padding_width_ = pooling_parameter_->pad_l_;
MS_CHECK_TRUE(!output_tensor_->quant_params().empty(), "output quant_params is empty");
QuantArg output_quant_arg = output_tensor_->quant_params().at(0);
LiteQuantParam output_quant_arg = output_tensor_->quant_params().at(0);
CalculateActivationRangeQuantized(pooling_parameter_->act_type_ == ActType_Relu,
pooling_parameter_->act_type_ == ActType_Relu6, output_quant_arg.zeroPoint,
output_quant_arg.scale, &act_min_, &act_max_);

View File

@ -25,8 +25,8 @@ namespace mindspore::lite::micro::cmsis {
int ReshapeInt8Coder::DoCode(CoderContext *const context) {
int elements_num = input_tensor_->ElementsNum();
std::vector<QuantArg> input_quant_args = input_tensor_->quant_params();
std::vector<QuantArg> output_quant_args = output_tensor_->quant_params();
std::vector<LiteQuantParam> input_quant_args = input_tensor_->quant_params();
std::vector<LiteQuantParam> output_quant_args = output_tensor_->quant_params();
MS_CHECK_TRUE(!input_quant_args.empty(), "input quant_params is empty");
MS_CHECK_TRUE(!output_quant_args.empty(), "output quant_params is empty");
// in Int8Reshape, the following values are checked. then it will do a memory copy

View File

@ -25,10 +25,10 @@ int SoftMaxInt8Coder::Prepare(CoderContext *const context) {
SoftmaxBaseCoder::Init();
MS_CHECK_TRUE(!input_tensor_->quant_params().empty(), "input quant_params is empty");
QuantArg in_quant_arg = input_tensor_->quant_params().at(0);
LiteQuantParam in_quant_arg = input_tensor_->quant_params().at(0);
quant_params_.in_quant_args_.zp_ = -in_quant_arg.zeroPoint;
std::vector<QuantArg> out_quant_args = output_tensor_->quant_params();
std::vector<LiteQuantParam> out_quant_args = output_tensor_->quant_params();
MS_CHECK_TRUE(!out_quant_args.empty(), "output quant_params is empty");
quant_params_.out_quant_arg_.scale_ = static_cast<float>(out_quant_args.at(0).scale);
quant_params_.out_quant_arg_.zp_ = out_quant_args.at(0).zeroPoint;

View File

@ -36,10 +36,10 @@ int DetectionPostProcessInt8Coder::MallocInputsBuffer() {
int DetectionPostProcessInt8Coder::GetInputData(CoderContext *const context, Serializer *const code) {
Tensor *boxes = input_tensors_.at(0);
MS_CHECK_PTR(boxes);
lite::QuantArg boxes_quant_param = boxes->quant_params().front();
lite::LiteQuantParam boxes_quant_param = boxes->quant_params().front();
Tensor *scores = input_tensors_.at(1);
MS_CHECK_PTR(scores);
lite::QuantArg scores_quant_param = scores->quant_params().front();
lite::LiteQuantParam scores_quant_param = scores->quant_params().front();
MS_CHECK_TRUE(boxes->data_type() == kNumberTypeInt8, "Input data type error");
MS_CHECK_TRUE(scores->data_type() == kNumberTypeInt8, "Input data type error");

View File

@ -90,7 +90,7 @@ void MatMulBaseInt8Coder::FreeQuantParam() {
}
int MatMulBaseInt8Coder::MallocQuantParam() {
std::vector<QuantArg> weight_quant_params = filter_tensor_->quant_params();
std::vector<LiteQuantParam> weight_quant_params = filter_tensor_->quant_params();
int col = filter_tensor_->shape().front();
filter_per_channel_ = (weight_quant_params.size() > 1);
weight_quant_num_ = filter_per_channel_ ? col : 1;
@ -108,16 +108,16 @@ int MatMulBaseInt8Coder::MallocQuantParam() {
}
int MatMulBaseInt8Coder::InitQuantParam() {
std::vector<QuantArg> in_quant_params = input_tensor_->quant_params();
std::vector<LiteQuantParam> in_quant_params = input_tensor_->quant_params();
MS_CHECK_TRUE(!in_quant_params.empty(), "in_quant_params is empty");
quant_.input_.zp_ = in_quant_params.front().zeroPoint;
quant_.input_.scale_ = static_cast<float>(in_quant_params.front().scale);
std::vector<QuantArg> out_quant_params = output_tensor_->quant_params();
std::vector<LiteQuantParam> out_quant_params = output_tensor_->quant_params();
MS_CHECK_TRUE(!out_quant_params.empty(), "out_quant_params is empty");
quant_.output_.zp_ = out_quant_params.front().zeroPoint;
quant_.output_.scale_ = static_cast<float>(out_quant_params.front().scale);
std::vector<QuantArg> weight_quant_params = filter_tensor_->quant_params();
std::vector<LiteQuantParam> weight_quant_params = filter_tensor_->quant_params();
for (int i = 0; i < weight_quant_num_; i++) {
quant_.filter_zp_[i] = weight_quant_params[i].zeroPoint;
quant_.filter_scale_[i] = static_cast<float>(weight_quant_params[i].scale);

View File

@ -44,8 +44,8 @@ int PoolingInt8Coder::DoCode(CoderContext *const context) {
pooling_parameter->output_w_ = out_tensor->Width();
// get quant params
std::vector<QuantArg> in_quant_args = in_tensor->quant_params();
std::vector<QuantArg> out_quant_args = out_tensor->quant_params();
std::vector<LiteQuantParam> in_quant_args = in_tensor->quant_params();
std::vector<LiteQuantParam> out_quant_args = out_tensor->quant_params();
Collect(context,
{
"nnacl/int8/pooling_int8.h",

View File

@ -24,8 +24,8 @@
using mindspore::schema::PrimitiveType_ReduceFusion;
namespace mindspore::lite::micro::nnacl {
int ReduceInt8Coder::CalculateQuantArgs() {
QuantArg input_quant = input_tensor_->quant_params().at(0);
QuantArg output_quant = output_tensor_->quant_params().at(0);
LiteQuantParam input_quant = input_tensor_->quant_params().at(0);
LiteQuantParam output_quant = output_tensor_->quant_params().at(0);
quant_arg_.in_scale_ = input_quant.scale;
quant_arg_.in_zp_ = input_quant.zeroPoint;
quant_arg_.out_scale_ = output_quant.scale;

View File

@ -30,8 +30,8 @@ int ReshapeInt8Coder::DoCode(CoderContext *const context) {
MS_CHECK_PTR(input);
MS_CHECK_PTR(output);
int elements_num = input->ElementsNum();
std::vector<QuantArg> input_quant_args = input->quant_params();
std::vector<QuantArg> output_quant_args = output->quant_params();
std::vector<LiteQuantParam> input_quant_args = input->quant_params();
std::vector<LiteQuantParam> output_quant_args = output->quant_params();
Collect(context,
{

View File

@ -30,11 +30,11 @@ using mindspore::schema::PrimitiveType_Softmax;
namespace mindspore::lite::micro::nnacl {
int SoftMaxInt8Coder::Prepare(CoderContext *const context) {
SoftmaxBaseCoder::Init();
std::vector<QuantArg> in_quant_args = input_tensor_->quant_params();
std::vector<LiteQuantParam> in_quant_args = input_tensor_->quant_params();
quant_params_.in_quant_args_.scale_ = in_quant_args.at(0).scale;
quant_params_.in_quant_args_.zp_ = -in_quant_args.at(0).zeroPoint;
std::vector<QuantArg> out_quant_args = output_tensor_->quant_params();
std::vector<LiteQuantParam> out_quant_args = output_tensor_->quant_params();
quant_params_.out_quant_arg_.scale_ = out_quant_args.at(0).scale;
quant_params_.out_quant_arg_.zp_ = out_quant_args.at(0).zeroPoint;
quant_params_.output_activation_min_ = std::numeric_limits<int8_t>::min();
@ -82,7 +82,7 @@ int SoftMaxInt8Coder::DoCode(CoderContext *const context) {
NNaclInt8Serializer code;
code.precision(kPrecision);
code.CodeStruct("quant_args", quant_params_);
code.CodeStruct("quant_params", quant_params_);
code.CodeStruct("softmax_parameter", *softmax_param_);
code.CodeFunction("memset", exp_data_, 0, exp_data_size_);
@ -91,7 +91,7 @@ int SoftMaxInt8Coder::DoCode(CoderContext *const context) {
MS_CHECK_TRUE(thread_num_ > 0, "thread_num_ <= 0");
int stride = UP_DIV(outter_size, thread_num_);
int count = MSMIN(stride, outter_size - stride * kDefaultTaskId);
code.CodeFunction("SoftmaxInt8", input_tensor_, output_tensor_, count, exp_data_, sum_data_, "&quant_args",
code.CodeFunction("SoftmaxInt8", input_tensor_, output_tensor_, count, exp_data_, sum_data_, "&quant_params",
"(SoftmaxParameter *)&softmax_parameter");
context->AppendCode(code.str());
return RET_OK;

View File

@ -23,7 +23,7 @@
namespace mindspore {
namespace lite {
struct QuantArg {
struct LiteQuantParam {
double scale;
int32_t zeroPoint;
float var_corr{1};
@ -57,13 +57,15 @@ class MTensor : public mindspore::tensor::MSTensor {
void *MutableData() override;
void *data() override { return data_; }
void set_data(void *data) override { data_ = data; }
Vector<LiteQuantParam> quant_params() const override { return this->quant_params_; }
void set_quant_params(const Vector<LiteQuantParam> quant_params) override { this->quant_params_ = quant_params; }
private:
String tensor_name_;
TypeId data_type_;
Vector<int> shape_;
void *data_ = nullptr;
Vector<QuantArg> quant_params_;
Vector<LiteQuantParam> quant_params_;
};
} // namespace lite
} // namespace mindspore

View File

@ -23,7 +23,7 @@
namespace mindspore {
namespace lite {
struct QuantArg {
struct LiteQuantParam {
double scale;
int32_t zeroPoint;
float var_corr{1};
@ -57,13 +57,15 @@ class MTensor : public mindspore::tensor::MSTensor {
void *MutableData() override;
void *data() override { return data_; }
void set_data(void *data) override { data_ = data; }
Vector<LiteQuantParam> quant_params() const override { return this->quant_params_; }
void set_quant_params(const Vector<LiteQuantParam> quant_params) override { this->quant_params_ = quant_params; }
private:
String tensor_name_;
TypeId data_type_;
Vector<int> shape_;
void *data_ = nullptr;
Vector<QuantArg> quant_params_;
Vector<LiteQuantParam> quant_params_;
};
} // namespace lite
} // namespace mindspore

View File

@ -29,6 +29,7 @@
#include "include/errorcode.h"
#include "include/lite_utils.h"
#include "include/ms_tensor.h"
#include "src/tensor.h"
#include "src/common/log_adapter.h"
namespace mindspore {
@ -195,6 +196,39 @@ class MSTensor::Impl {
lite_tensor_->set_data(data);
}
virtual std::vector<QuantParam> QuantParams() const {
if (lite_tensor_ == nullptr) {
MS_LOG(ERROR) << "Invalid tensor.";
return std::vector<QuantParam>{};
}
auto lite_quant_params = lite_tensor_->quant_params();
std::vector<QuantParam> quant_params;
for (size_t i = 0; i < lite_quant_params.size(); i++) {
QuantParam param;
param.bit_num = lite_quant_params[i].bitNum;
param.scale = lite_quant_params[i].scale;
param.zero_point = lite_quant_params[i].zeroPoint;
quant_params.push_back(param);
}
return quant_params;
}
void SetQuantParams(std::vector<QuantParam> quant_params) {
if (lite_tensor_ == nullptr) {
MS_LOG(ERROR) << "Invalid tensor.";
return;
}
std::vector<lite::LiteQuantParam> lite_quant_params;
for (size_t i = 0; i < quant_params.size(); i++) {
lite::LiteQuantParam lite_arg;
lite_arg.bitNum = quant_params[i].bit_num;
lite_arg.scale = quant_params[i].scale;
lite_arg.zeroPoint = quant_params[i].zero_point;
lite_quant_params.push_back(lite_arg);
}
lite_tensor_->set_quant_params(lite_quant_params);
}
virtual bool IsDevice() const { return false; }
tensor::MSTensor *lite_tensor() const { return lite_tensor_; }

View File

@ -25,9 +25,6 @@
#include "include/version.h"
namespace mindspore {
namespace {
constexpr int64_t MAX_MALLOC_SIZE = static_cast<size_t>(2000) * 1024 * 1024;
}
class Buffer::Impl {
public:
Impl() : data_() { MS_LOG(ERROR) << "Unsupported feature."; }
@ -319,6 +316,22 @@ void MSTensor::SetData(void *data) {
return impl_->SetData(data);
}
std::vector<QuantParam> MSTensor::QuantParams() const {
if (impl_ == nullptr) {
MS_LOG(ERROR) << "Invalid tensor implement.";
return std::vector<QuantParam>{};
}
return impl_->QuantParams();
}
void MSTensor::SetQuantParams(std::vector<QuantParam> quant_params) {
if (impl_ == nullptr) {
MS_LOG(ERROR) << "Invalid tensor implement.";
return;
}
return impl_->SetQuantParams(quant_params);
}
Buffer::Buffer() : impl_(nullptr) { MS_LOG(ERROR) << "Unsupported feature."; }
Buffer::Buffer(const void *data, size_t data_len) : impl_(nullptr) { MS_LOG(ERROR) << "Unsupported feature."; }
Buffer::~Buffer() = default;

View File

@ -107,7 +107,7 @@ void LiteOpActor::IsolateInputData(std::vector<std::shared_ptr<LiteOpActor>> *ac
new_tensor->set_allocator(kernel_->Context()->allocator);
}
new_tensor->set_tensor_name(kernel_->name() + "_duplicate_" + old_tensor->tensor_name());
for (QuantArg quant : old_tensor->quant_params()) {
for (LiteQuantParam quant : old_tensor->quant_params()) {
new_tensor->AddQuantParam(quant);
}
isolate_input_map_.insert(std::make_pair(new_tensor, old_tensor));

View File

@ -94,7 +94,7 @@ void LiteSession::ConvertTensorsQuantParam(const schema::Tensor *src_tensor, lit
auto quant_params = src_tensor->quantParams();
if (quant_params != nullptr) {
for (size_t j = 0; j < quant_params->size(); j++) {
QuantArg quant_arg{};
LiteQuantParam quant_arg{};
quant_arg.bitNum = quant_params->Get(j)->numBits();
quant_arg.scale = quant_params->Get(j)->scale();
quant_arg.zeroPoint = quant_params->Get(j)->zeroPoint();
@ -378,7 +378,7 @@ void LiteSession::IsolateOutputTensor() {
new Tensor(src_tensor->data_type(), src_tensor->shape(), src_tensor->format(), Tensor::GRAPH_OUTPUT);
new_tensor->set_allocator(src_tensor->allocator()); /* GPU use opencl allocator */
new_tensor->set_tensor_name(src_tensor->tensor_name() + "_duplicate");
for (QuantArg quant : src_tensor->quant_params()) {
for (LiteQuantParam quant : src_tensor->quant_params()) {
new_tensor->AddQuantParam(quant);
}
new_tensor->set_init_ref_count(src_tensor->init_ref_count());

View File

@ -46,9 +46,9 @@ int ConcatInt8CPUKernel::Init() {
}
auto output_tensor = out_tensors_.at(kOutputIndex);
auto quant_args = output_tensor->quant_params();
concat_param_->quant_arg_.out_args_.scale_ = quant_args.front().scale;
concat_param_->quant_arg_.out_args_.zp_ = quant_args.front().zeroPoint;
auto quant_params = output_tensor->quant_params();
concat_param_->quant_arg_.out_args_.scale_ = quant_params.front().scale;
concat_param_->quant_arg_.out_args_.zp_ = quant_params.front().zeroPoint;
concat_param_->quant_arg_.output_activation_min_ = std::numeric_limits<int8_t>::min();
concat_param_->quant_arg_.output_activation_max_ = std::numeric_limits<int8_t>::max();

View File

@ -35,7 +35,7 @@ class DetectionPostProcessInt8CPUKernel : public DetectionPostProcessBaseCPUKern
int8_t *data_int8_ = nullptr;
float *data_fp32_ = nullptr;
lite::QuantArg quant_param_;
lite::LiteQuantParam quant_param_;
int quant_size_ = 0;
int thread_n_stride_ = 0;
int DequantizeInt8ToFp32(const int task_id);

View File

@ -61,8 +61,8 @@ int SqueezeInt8CPUKernel::Init() {
MS_ASSERT(this->out_tensors_.size() == 1);
auto output_tensor = out_tensors_.at(0);
MS_ASSERT(output_tensor != nullptr);
auto quant_args = output_tensor->quant_params();
MS_ASSERT(quant_args.size() == 1);
auto quant_params = output_tensor->quant_params();
MS_ASSERT(quant_params.size() == 1);
quant_squeeze_param_->out_quant_args_ = reinterpret_cast<QuantArg *>(malloc(sizeof(QuantArg)));
if (quant_squeeze_param_->in_quant_args_ == nullptr) {
MS_LOG(ERROR) << "malloc QuantArg failed";
@ -76,8 +76,8 @@ int SqueezeInt8CPUKernel::Init() {
}
return RET_ERROR;
}
quant_squeeze_param_->out_quant_args_->scale_ = quant_args.front().scale;
quant_squeeze_param_->out_quant_args_->zp_ = quant_args.front().zeroPoint;
quant_squeeze_param_->out_quant_args_->scale_ = quant_params.front().scale;
quant_squeeze_param_->out_quant_args_->zp_ = quant_params.front().zeroPoint;
if (!InferShapeDone()) {
return RET_OK;
}

View File

@ -29,10 +29,10 @@ using mindspore::schema::PrimitiveType_Unsqueeze;
namespace mindspore::kernel {
int Unsqueezeint8CPUKernel::Init() {
auto *input_tensor = in_tensors_.at(0);
auto quant_args = input_tensor->quant_params();
MS_ASSERT(quant_args.size() == 1);
param_->quant_arg.in_quant_args_.scale_ = quant_args.front().scale;
param_->quant_arg.in_quant_args_.zp_ = quant_args.front().zeroPoint;
auto quant_params = input_tensor->quant_params();
MS_ASSERT(quant_params.size() == 1);
param_->quant_arg.in_quant_args_.scale_ = quant_params.front().scale;
param_->quant_arg.in_quant_args_.zp_ = quant_params.front().zeroPoint;
auto out_quant_args = input_tensor->quant_params();
param_->quant_arg.out_quant_args_.scale_ = out_quant_args.front().scale;

View File

@ -390,9 +390,11 @@ void Tensor::DecRefCount() {
}
}
void Tensor::AddQuantParam(const QuantArg &quant_arg) { this->quant_params_.push_back(quant_arg); }
void Tensor::AddQuantParam(const LiteQuantParam &quant_param) { this->quant_params_.push_back(quant_param); }
std::vector<QuantArg> Tensor::quant_params() const { return this->quant_params_; }
std::vector<LiteQuantParam> Tensor::quant_params() const { return this->quant_params_; }
void Tensor::set_quant_params(const std::vector<LiteQuantParam> quant_params) { this->quant_params_ = quant_params; }
std::vector<float> Tensor::quant_clusters() const { return this->quant_clusters_; }

View File

@ -34,7 +34,7 @@
namespace mindspore {
namespace lite {
struct QuantArg {
struct LiteQuantParam {
double scale;
int32_t zeroPoint;
float var_corr{1};
@ -162,9 +162,11 @@ class Tensor : public mindspore::tensor::MSTensor {
std::string ToString() const;
void AddQuantParam(const QuantArg &quant_arg);
void AddQuantParam(const LiteQuantParam &quant_param);
std::vector<QuantArg> quant_params() const;
std::vector<LiteQuantParam> quant_params() const override;
void set_quant_params(std::vector<LiteQuantParam>) override;
std::vector<float> quant_clusters() const;
@ -242,7 +244,7 @@ class Tensor : public mindspore::tensor::MSTensor {
Category category_;
std::atomic_int ref_count_ = {0};
size_t init_ref_count_ = 0;
std::vector<QuantArg> quant_params_;
std::vector<LiteQuantParam> quant_params_;
std::vector<float> quant_clusters_;
AllocatorPtr allocator_ = nullptr;
Tensor *root_tensor_ = nullptr;

View File

@ -58,7 +58,7 @@ void DetectionPostProcessTestInit(std::vector<lite::Tensor *> *inputs_, std::vec
auto input_anchors_data =
reinterpret_cast<uint8_t *>(mindspore::lite::ReadFile(input_anchors_path.c_str(), &input_anchors_size));
auto *input_anchors = new lite::Tensor;
lite::QuantArg quant_arg;
lite::LiteQuantParam quant_arg;
quant_arg.zeroPoint = 0;
quant_arg.scale = 0.00645306;
input_anchors->AddQuantParam(quant_arg);

View File

@ -39,9 +39,9 @@ TEST_F(TestQuantizedAdd, Add) {
in_tensor1.set_data(input_data1);
out_tensor.set_data(output_data);
const lite::QuantArg quant_in0 = {0.00784314f, 0}; // -1.0--1.0 -> 0--255
const lite::QuantArg quant_in1 = {0.00784314f, 0};
const lite::QuantArg quant_out = {0.00784314f, 0};
const lite::LiteQuantParam quant_in0 = {0.00784314f, 0}; // -1.0--1.0 -> 0--255
const lite::LiteQuantParam quant_in1 = {0.00784314f, 0};
const lite::LiteQuantParam quant_out = {0.00784314f, 0};
in_tensor0.AddQuantParam(quant_in0);
in_tensor1.AddQuantParam(quant_in1);
out_tensor.AddQuantParam(quant_out);

View File

@ -38,10 +38,10 @@ TEST_F(TestArithmeticSelfInt8, floor_quant0_thread2) {
const int output_size = 12;
int8_t output[12];
std::vector<int> output_shape = {2, 3, 2};
lite::QuantArg input_quant_arg;
lite::LiteQuantParam input_quant_arg;
input_quant_arg.scale = 1.0;
input_quant_arg.zeroPoint = 0;
lite::QuantArg output_quant_arg;
lite::LiteQuantParam output_quant_arg;
output_quant_arg.scale = 1.0;
output_quant_arg.zeroPoint = 0;
@ -100,10 +100,10 @@ TEST_F(TestArithmeticSelfInt8, floor_quant1_thread2) {
const int output_size = 12;
int8_t output[12];
std::vector<int> output_shape = {2, 3, 2};
lite::QuantArg input_quant_arg;
lite::LiteQuantParam input_quant_arg;
input_quant_arg.scale = 0.8;
input_quant_arg.zeroPoint = 0;
lite::QuantArg output_quant_arg;
lite::LiteQuantParam output_quant_arg;
output_quant_arg.scale = 1.5;
output_quant_arg.zeroPoint = 0;
@ -162,10 +162,10 @@ TEST_F(TestArithmeticSelfInt8, round_quant0_thread2) {
const int output_size = 12;
int8_t output[12];
std::vector<int> output_shape = {2, 3, 2};
lite::QuantArg input_quant_arg;
lite::LiteQuantParam input_quant_arg;
input_quant_arg.scale = 1.0;
input_quant_arg.zeroPoint = 0;
lite::QuantArg output_quant_arg;
lite::LiteQuantParam output_quant_arg;
output_quant_arg.scale = 1.0;
output_quant_arg.zeroPoint = 0;
@ -224,10 +224,10 @@ TEST_F(TestArithmeticSelfInt8, round_quant1_thread2) {
const int output_size = 12;
int8_t output[12];
std::vector<int> output_shape = {2, 3, 2};
lite::QuantArg input_quant_arg;
lite::LiteQuantParam input_quant_arg;
input_quant_arg.scale = 0.8;
input_quant_arg.zeroPoint = 0;
lite::QuantArg output_quant_arg;
lite::LiteQuantParam output_quant_arg;
output_quant_arg.scale = 1.5;
output_quant_arg.zeroPoint = 0;
@ -286,10 +286,10 @@ TEST_F(TestArithmeticSelfInt8, ceil_quant0_thread2) {
const int output_size = 12;
int8_t output[12];
std::vector<int> output_shape = {2, 3, 2};
lite::QuantArg input_quant_arg;
lite::LiteQuantParam input_quant_arg;
input_quant_arg.scale = 1.0;
input_quant_arg.zeroPoint = 0;
lite::QuantArg output_quant_arg;
lite::LiteQuantParam output_quant_arg;
output_quant_arg.scale = 1.0;
output_quant_arg.zeroPoint = 0;
@ -348,10 +348,10 @@ TEST_F(TestArithmeticSelfInt8, ceil_quant1_thread2) {
const int output_size = 12;
int8_t output[12];
std::vector<int> output_shape = {2, 3, 2};
lite::QuantArg input_quant_arg;
lite::LiteQuantParam input_quant_arg;
input_quant_arg.scale = 0.8;
input_quant_arg.zeroPoint = 0;
lite::QuantArg output_quant_arg;
lite::LiteQuantParam output_quant_arg;
output_quant_arg.scale = 1.5;
output_quant_arg.zeroPoint = 0;
@ -410,10 +410,10 @@ TEST_F(TestArithmeticSelfInt8, abs_quant0_thread0) {
const int output_size = 12;
int8_t output[12];
std::vector<int> output_shape = {2, 3, 2};
lite::QuantArg input_quant_arg;
lite::LiteQuantParam input_quant_arg;
input_quant_arg.scale = 1.0;
input_quant_arg.zeroPoint = 0;
lite::QuantArg output_quant_arg;
lite::LiteQuantParam output_quant_arg;
output_quant_arg.scale = 1.0;
output_quant_arg.zeroPoint = 0;
@ -472,10 +472,10 @@ TEST_F(TestArithmeticSelfInt8, abs_quant1_thread2) {
const int output_size = 12;
int8_t output[12];
std::vector<int> output_shape = {2, 3, 2};
lite::QuantArg input_quant_arg;
lite::LiteQuantParam input_quant_arg;
input_quant_arg.scale = 0.8;
input_quant_arg.zeroPoint = 0;
lite::QuantArg output_quant_arg;
lite::LiteQuantParam output_quant_arg;
output_quant_arg.scale = 1.5;
output_quant_arg.zeroPoint = 0;
@ -534,10 +534,10 @@ TEST_F(TestArithmeticSelfInt8, sin_quant0_thread2) {
const int output_size = 4;
int8_t output[4];
std::vector<int> output_shape = {2, 2};
lite::QuantArg input_quant_arg;
lite::LiteQuantParam input_quant_arg;
input_quant_arg.scale = 1.0;
input_quant_arg.zeroPoint = 0;
lite::QuantArg output_quant_arg;
lite::LiteQuantParam output_quant_arg;
output_quant_arg.scale = 1.0;
output_quant_arg.zeroPoint = 0;
@ -596,10 +596,10 @@ TEST_F(TestArithmeticSelfInt8, cos_quant0_thread2) {
const int output_size = 4;
int8_t output[4];
std::vector<int> output_shape = {2, 2};
lite::QuantArg input_quant_arg;
lite::LiteQuantParam input_quant_arg;
input_quant_arg.scale = 1.0;
input_quant_arg.zeroPoint = 0;
lite::QuantArg output_quant_arg;
lite::LiteQuantParam output_quant_arg;
output_quant_arg.scale = 1.0;
output_quant_arg.zeroPoint = 0;
@ -658,10 +658,10 @@ TEST_F(TestArithmeticSelfInt8, log_quant0_thread2) {
const int output_size = 12;
int8_t output[12];
std::vector<int> output_shape = {2, 3, 2};
lite::QuantArg input_quant_arg;
lite::LiteQuantParam input_quant_arg;
input_quant_arg.scale = 1.0;
input_quant_arg.zeroPoint = 0;
lite::QuantArg output_quant_arg;
lite::LiteQuantParam output_quant_arg;
output_quant_arg.scale = 1.0;
output_quant_arg.zeroPoint = 0;
@ -720,10 +720,10 @@ TEST_F(TestArithmeticSelfInt8, sqrt_quant0_thread2) {
const int output_size = 12;
int8_t output[12];
std::vector<int> output_shape = {2, 3, 2};
lite::QuantArg input_quant_arg;
lite::LiteQuantParam input_quant_arg;
input_quant_arg.scale = 1.0;
input_quant_arg.zeroPoint = 0;
lite::QuantArg output_quant_arg;
lite::LiteQuantParam output_quant_arg;
output_quant_arg.scale = 1.0;
output_quant_arg.zeroPoint = 0;
@ -782,10 +782,10 @@ TEST_F(TestArithmeticSelfInt8, rsqrt_quant0_thread2) {
const int output_size = 12;
int8_t output[12];
std::vector<int> output_shape = {2, 3, 2};
lite::QuantArg input_quant_arg;
lite::LiteQuantParam input_quant_arg;
input_quant_arg.scale = 1.0;
input_quant_arg.zeroPoint = 0;
lite::QuantArg output_quant_arg;
lite::LiteQuantParam output_quant_arg;
output_quant_arg.scale = 1.0;
output_quant_arg.zeroPoint = 0;
@ -844,10 +844,10 @@ TEST_F(TestArithmeticSelfInt8, square_quant0_thread2) {
const int output_size = 12;
int8_t output[12];
std::vector<int> output_shape = {2, 3, 2};
lite::QuantArg input_quant_arg;
lite::LiteQuantParam input_quant_arg;
input_quant_arg.scale = 1.0;
input_quant_arg.zeroPoint = 0;
lite::QuantArg output_quant_arg;
lite::LiteQuantParam output_quant_arg;
output_quant_arg.scale = 1.0;
output_quant_arg.zeroPoint = 0;
@ -906,10 +906,10 @@ TEST_F(TestArithmeticSelfInt8, square_quant1_thread2) {
const int output_size = 12;
int8_t output[12];
std::vector<int> output_shape = {2, 3, 2};
lite::QuantArg input_quant_arg;
lite::LiteQuantParam input_quant_arg;
input_quant_arg.scale = 0.8;
input_quant_arg.zeroPoint = 0;
lite::QuantArg output_quant_arg;
lite::LiteQuantParam output_quant_arg;
output_quant_arg.scale = 1.5;
output_quant_arg.zeroPoint = 0;
@ -968,10 +968,10 @@ TEST_F(TestArithmeticSelfInt8, logical_not_quant0_thread2) {
const int output_size = 12;
int8_t output[12];
std::vector<int> output_shape = {2, 3, 2};
lite::QuantArg input_quant_arg;
lite::LiteQuantParam input_quant_arg;
input_quant_arg.scale = 1.0;
input_quant_arg.zeroPoint = 0;
lite::QuantArg output_quant_arg;
lite::LiteQuantParam output_quant_arg;
output_quant_arg.scale = 1.0;
output_quant_arg.zeroPoint = 0;

View File

@ -44,22 +44,22 @@ TEST_F(TestBatchnormInt8, FusedTest) {
std::vector<int> shape = {1, 1, 6, 2};
lite::QuantArg input_quant_arg;
lite::LiteQuantParam input_quant_arg;
input_quant_arg.scale = 0.1;
input_quant_arg.zeroPoint = 1;
lite::QuantArg input_quant_arg_1;
lite::LiteQuantParam input_quant_arg_1;
input_quant_arg_1.scale = 0.5;
input_quant_arg_1.zeroPoint = 2;
lite::QuantArg input_quant_arg_2;
lite::LiteQuantParam input_quant_arg_2;
input_quant_arg_2.scale = 0.02;
input_quant_arg_2.zeroPoint = 3;
lite::QuantArg input_quant_arg_3;
lite::LiteQuantParam input_quant_arg_3;
input_quant_arg_3.scale = 0.5;
input_quant_arg_3.zeroPoint = 15;
lite::QuantArg input_quant_arg_4;
lite::LiteQuantParam input_quant_arg_4;
input_quant_arg_4.scale = 0.25;
input_quant_arg_4.zeroPoint = 1;
lite::QuantArg output_quant_arg;
lite::LiteQuantParam output_quant_arg;
output_quant_arg.scale = 0.8;
output_quant_arg.zeroPoint = 0;
@ -143,16 +143,16 @@ TEST_F(TestBatchnormInt8, BNTest) {
std::vector<int> shape = {1, 1, 6, 2};
lite::QuantArg input_quant_arg;
lite::LiteQuantParam input_quant_arg;
input_quant_arg.scale = 0.1;
input_quant_arg.zeroPoint = 1;
lite::QuantArg input_quant_arg_1;
lite::LiteQuantParam input_quant_arg_1;
input_quant_arg_1.scale = 0.05;
input_quant_arg_1.zeroPoint = 2;
lite::QuantArg input_quant_arg_2;
lite::LiteQuantParam input_quant_arg_2;
input_quant_arg_2.scale = 0.1;
input_quant_arg_2.zeroPoint = -1;
lite::QuantArg output_quant_arg;
lite::LiteQuantParam output_quant_arg;
output_quant_arg.scale = 0.5;
output_quant_arg.zeroPoint = 0;

View File

@ -42,10 +42,10 @@ TEST_F(TestConcatInt8, Concat1_axis0) {
int8_t output[12];
std::vector<int> output_shape = {6, 2};
lite::QuantArg input_quant_arg;
lite::LiteQuantParam input_quant_arg;
input_quant_arg.scale = 1.0;
input_quant_arg.zeroPoint = 0;
lite::QuantArg output_quant_arg;
lite::LiteQuantParam output_quant_arg;
output_quant_arg.scale = 1.0;
output_quant_arg.zeroPoint = 0;
@ -116,10 +116,10 @@ TEST_F(TestConcatInt8, Concat1_axis1_thread2) {
int8_t output[16];
std::vector<int> output_shape = {2, 4, 2};
lite::QuantArg input_quant_arg;
lite::LiteQuantParam input_quant_arg;
input_quant_arg.scale = 1.0;
input_quant_arg.zeroPoint = 0;
lite::QuantArg output_quant_arg;
lite::LiteQuantParam output_quant_arg;
output_quant_arg.scale = 1.0;
output_quant_arg.zeroPoint = 0;
@ -191,10 +191,10 @@ TEST_F(TestConcatInt8, Concat1_axis1_thread2_quant1) {
int8_t output[16];
std::vector<int> output_shape = {2, 4, 2};
lite::QuantArg input_quant_arg;
lite::LiteQuantParam input_quant_arg;
input_quant_arg.scale = 1.0;
input_quant_arg.zeroPoint = 0;
lite::QuantArg output_quant_arg;
lite::LiteQuantParam output_quant_arg;
output_quant_arg.scale = 2.0;
output_quant_arg.zeroPoint = 0;

View File

@ -72,7 +72,7 @@ TEST_F(TestConv1x1Int8, Input1x1PrePack2) {
int Conv1x1Int8TestInit1_perchannel(std::vector<lite::Tensor *> *inputs_, std::vector<lite::Tensor *> *outputs_,
ConvParameter *conv_param, int8_t **correct) {
Tensor *in_t = new Tensor(kNumberTypeInt8, {1, 2, 3, 4}, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR);
auto in_quant_arg = new mindspore::lite::QuantArg();
auto in_quant_arg = new mindspore::lite::LiteQuantParam();
in_quant_arg->zeroPoint = -42, in_quant_arg->scale = 0.117647;
in_t->AddQuantParam(*in_quant_arg);
in_t->MallocData();
@ -83,11 +83,11 @@ int Conv1x1Int8TestInit1_perchannel(std::vector<lite::Tensor *> *inputs_, std::v
Tensor *weight_t = new Tensor(kNumberTypeInt8, {3, 1, 1, 4}, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR);
weight_t->MallocData();
auto weight_quant_arg1 = new mindspore::lite::QuantArg();
auto weight_quant_arg1 = new mindspore::lite::LiteQuantParam();
weight_quant_arg1->zeroPoint = 66, weight_quant_arg1->scale = 0.96439215686275;
auto weight_quant_arg2 = new mindspore::lite::QuantArg();
auto weight_quant_arg2 = new mindspore::lite::LiteQuantParam();
weight_quant_arg2->zeroPoint = 33, weight_quant_arg2->scale = 0.76439215686275;
auto weight_quant_arg3 = new mindspore::lite::QuantArg();
auto weight_quant_arg3 = new mindspore::lite::LiteQuantParam();
weight_quant_arg3->zeroPoint = -20, weight_quant_arg3->scale = 0.99117647;
weight_t->AddQuantParam(*weight_quant_arg1);
weight_t->AddQuantParam(*weight_quant_arg2);
@ -98,7 +98,7 @@ int Conv1x1Int8TestInit1_perchannel(std::vector<lite::Tensor *> *inputs_, std::v
Tensor *out_t = new Tensor(kNumberTypeInt8, {1, 2, 3, 3}, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR);
out_t->MallocData();
auto output_quant_arg = new mindspore::lite::QuantArg();
auto output_quant_arg = new mindspore::lite::LiteQuantParam();
output_quant_arg->zeroPoint = 7, output_quant_arg->scale = 0.294321233;
out_t->AddQuantParam(*output_quant_arg);
outputs_->push_back(out_t);
@ -141,7 +141,7 @@ TEST_F(TestConv1x1Int8, Conv1x1TestPerChannel) {
int Conv1x1Int8TestInit1(std::vector<lite::Tensor *> *inputs_, std::vector<lite::Tensor *> *outputs_,
ConvParameter *conv_param, int8_t **correct) {
Tensor *in_t = new Tensor(kNumberTypeInt8, {1, 2, 3, 4}, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR);
auto in_quant_arg = new mindspore::lite::QuantArg();
auto in_quant_arg = new mindspore::lite::LiteQuantParam();
in_quant_arg->zeroPoint = -42, in_quant_arg->scale = 0.117647;
in_t->AddQuantParam(*in_quant_arg);
in_t->MallocData();
@ -153,7 +153,7 @@ int Conv1x1Int8TestInit1(std::vector<lite::Tensor *> *inputs_, std::vector<lite:
inputs_->push_back(in_t);
Tensor *weight_t = new Tensor(kNumberTypeInt8, {3, 1, 1, 4}, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR);
auto weight_quant_arg = new mindspore::lite::QuantArg();
auto weight_quant_arg = new mindspore::lite::LiteQuantParam();
weight_quant_arg->zeroPoint = 66, weight_quant_arg->scale = 0.036439215686275;
weight_t->AddQuantParam(*weight_quant_arg);
weight_t->MallocData();
@ -165,7 +165,7 @@ int Conv1x1Int8TestInit1(std::vector<lite::Tensor *> *inputs_, std::vector<lite:
Tensor *out_t = new Tensor(kNumberTypeInt8, {1, 2, 3, 3}, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR);
out_t->MallocData();
auto output_quant_arg = new mindspore::lite::QuantArg();
auto output_quant_arg = new mindspore::lite::LiteQuantParam();
output_quant_arg->zeroPoint = 7, output_quant_arg->scale = 0.234321233;
out_t->AddQuantParam(*output_quant_arg);
outputs_->push_back(out_t);
@ -211,7 +211,7 @@ int Conv1x1Int8TestInit2(std::vector<lite::Tensor *> *inputs_, std::vector<lite:
ConvParameter *conv_param, int8_t **correct) {
size_t buffer_size;
Tensor *in_t = new Tensor(kNumberTypeInt8, {1, 2, 3, 4}, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR);
auto in_quant_arg = new mindspore::lite::QuantArg();
auto in_quant_arg = new mindspore::lite::LiteQuantParam();
in_quant_arg->zeroPoint = -42, in_quant_arg->scale = 0.117647;
in_t->AddQuantParam(*in_quant_arg);
in_t->MallocData();
@ -222,7 +222,7 @@ int Conv1x1Int8TestInit2(std::vector<lite::Tensor *> *inputs_, std::vector<lite:
delete[] input;
Tensor *weight_t = new Tensor(kNumberTypeInt8, {3, 1, 1, 4}, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR);
auto weight_quant_arg = new mindspore::lite::QuantArg();
auto weight_quant_arg = new mindspore::lite::LiteQuantParam();
weight_quant_arg->zeroPoint = 66, weight_quant_arg->scale = 0.036439215686275;
weight_t->AddQuantParam(*weight_quant_arg);
weight_t->MallocData();
@ -242,7 +242,7 @@ int Conv1x1Int8TestInit2(std::vector<lite::Tensor *> *inputs_, std::vector<lite:
Tensor *out_t = new Tensor(kNumberTypeInt8, {1, 2, 3, 3}, mindspore::NHWC, lite::Tensor::Category::CONST_TENSOR);
out_t->MallocData();
auto output_quant_arg = new mindspore::lite::QuantArg();
auto output_quant_arg = new mindspore::lite::LiteQuantParam();
output_quant_arg->zeroPoint = 7, output_quant_arg->scale = 0.234321233;
out_t->AddQuantParam(*output_quant_arg);
outputs_->push_back(out_t);

View File

@ -39,10 +39,10 @@ TEST_F(TestCropInt8, crop_1d_axis0_offset0_quant0_thread2) {
const int output_size = 7;
int8_t output[7];
std::vector<int> output_shape = {7};
lite::QuantArg input_quant_arg;
lite::LiteQuantParam input_quant_arg;
input_quant_arg.scale = 1.0;
input_quant_arg.zeroPoint = 0;
lite::QuantArg output_quant_arg;
lite::LiteQuantParam output_quant_arg;
output_quant_arg.scale = 1.0;
output_quant_arg.zeroPoint = 0;
@ -105,10 +105,10 @@ TEST_F(TestCropInt8, crop_2d_axis1_offset0_quant0_thread2) {
const int output_size = 14;
int8_t output[14];
std::vector<int> output_shape = {2, 7};
lite::QuantArg input_quant_arg;
lite::LiteQuantParam input_quant_arg;
input_quant_arg.scale = 1.0;
input_quant_arg.zeroPoint = 0;
lite::QuantArg output_quant_arg;
lite::LiteQuantParam output_quant_arg;
output_quant_arg.scale = 1.0;
output_quant_arg.zeroPoint = 0;
@ -171,10 +171,10 @@ TEST_F(TestCropInt8, crop_3d_axis1_offset0_quant0_thread0) {
const int output_size = 2;
int8_t output[2];
std::vector<int> output_shape = {2, 1, 1};
lite::QuantArg input_quant_arg;
lite::LiteQuantParam input_quant_arg;
input_quant_arg.scale = 1.0;
input_quant_arg.zeroPoint = 0;
lite::QuantArg output_quant_arg;
lite::LiteQuantParam output_quant_arg;
output_quant_arg.scale = 1.0;
output_quant_arg.zeroPoint = 0;
@ -238,10 +238,10 @@ TEST_F(TestCropInt8, crop_3d_axis1_offset0_quant0_thread2) {
const int output_size = 14;
int8_t output[14];
std::vector<int> output_shape = {2, 7, 1};
lite::QuantArg input_quant_arg;
lite::LiteQuantParam input_quant_arg;
input_quant_arg.scale = 1.0;
input_quant_arg.zeroPoint = 0;
lite::QuantArg output_quant_arg;
lite::LiteQuantParam output_quant_arg;
output_quant_arg.scale = 1.0;
output_quant_arg.zeroPoint = 0;
@ -304,10 +304,10 @@ TEST_F(TestCropInt8, crop_4d_axis0_offset0_quant0_thread0) {
const int output_size = 1;
int8_t output[1];
std::vector<int> output_shape = {1, 1, 1, 1};
lite::QuantArg input_quant_arg;
lite::LiteQuantParam input_quant_arg;
input_quant_arg.scale = 1.0;
input_quant_arg.zeroPoint = 0;
lite::QuantArg output_quant_arg;
lite::LiteQuantParam output_quant_arg;
output_quant_arg.scale = 1.0;
output_quant_arg.zeroPoint = 0;
@ -370,10 +370,10 @@ TEST_F(TestCropInt8, crop_4d_axis1_offset0_quant0_thread0) {
const int output_size = 2;
int8_t output[2];
std::vector<int> output_shape = {2, 1, 1, 1};
lite::QuantArg input_quant_arg;
lite::LiteQuantParam input_quant_arg;
input_quant_arg.scale = 1.0;
input_quant_arg.zeroPoint = 0;
lite::QuantArg output_quant_arg;
lite::LiteQuantParam output_quant_arg;
output_quant_arg.scale = 1.0;
output_quant_arg.zeroPoint = 0;
@ -436,10 +436,10 @@ TEST_F(TestCropInt8, crop_4d_axis1_offset1_quant0_thread0) {
const int output_size = 4;
int8_t output[4];
std::vector<int> output_shape = {1, 1, 2, 2};
lite::QuantArg input_quant_arg;
lite::LiteQuantParam input_quant_arg;
input_quant_arg.scale = 1.0;
input_quant_arg.zeroPoint = 0;
lite::QuantArg output_quant_arg;
lite::LiteQuantParam output_quant_arg;
output_quant_arg.scale = 1.0;
output_quant_arg.zeroPoint = 0;
@ -505,10 +505,10 @@ TEST_F(TestCropInt8, crop_4d_axis1_offset1_quant1_thread0) {
const int output_size = 4;
int8_t output[4];
std::vector<int> output_shape = {1, 1, 2, 2};
lite::QuantArg input_quant_arg;
lite::LiteQuantParam input_quant_arg;
input_quant_arg.scale = 1.0;
input_quant_arg.zeroPoint = 0;
lite::QuantArg output_quant_arg;
lite::LiteQuantParam output_quant_arg;
output_quant_arg.scale = 2.0;
output_quant_arg.zeroPoint = 0;
@ -576,10 +576,10 @@ TEST_F(TestCropInt8, crop_4d_axis0_offset0_quant0_thread2) {
const int output_size = 7;
int8_t output[7];
std::vector<int> output_shape = {1, 7, 1, 1};
lite::QuantArg input_quant_arg;
lite::LiteQuantParam input_quant_arg;
input_quant_arg.scale = 1.0;
input_quant_arg.zeroPoint = 0;
lite::QuantArg output_quant_arg;
lite::LiteQuantParam output_quant_arg;
output_quant_arg.scale = 1.0;
output_quant_arg.zeroPoint = 0;
@ -644,10 +644,10 @@ TEST_F(TestCropInt8, crop_4d_axis0_offset0_quant0_thread3) {
const int output_size = 7;
int8_t output[7];
std::vector<int> output_shape = {1, 7, 1, 1};
lite::QuantArg input_quant_arg;
lite::LiteQuantParam input_quant_arg;
input_quant_arg.scale = 1.0;
input_quant_arg.zeroPoint = 0;
lite::QuantArg output_quant_arg;
lite::LiteQuantParam output_quant_arg;
output_quant_arg.scale = 1.0;
output_quant_arg.zeroPoint = 0;

View File

@ -28,7 +28,7 @@
using mindspore::lite::DeviceType;
namespace mindspore {
using mindspore::lite::QuantArg;
using mindspore::lite::LiteQuantParam;
using mindspore::lite::Tensor;
class TestDeconvInt8 : public mindspore::CommonTest {
public:
@ -277,7 +277,7 @@ int DeConvInt8TestInit1(std::vector<lite::Tensor *> *inputs_, std::vector<lite::
in_t->MallocData();
int8_t in[] = {6, 43, 38, 24, -8, 12, 41, -24, -20, 41, -19, -6, -26, -6, 23, -31, 34, 45, 8, 45, -39, -27, -48, 12};
memcpy(in_t->MutableData(), in, sizeof(int8_t) * in_t->ElementsNum());
auto *in_quant_arg = new QuantArg();
auto *in_quant_arg = new LiteQuantParam();
in_quant_arg->zeroPoint = -19, in_quant_arg->scale = 0.31228156;
in_t->AddQuantParam(*in_quant_arg);
inputs_->push_back(in_t);
@ -288,14 +288,14 @@ int DeConvInt8TestInit1(std::vector<lite::Tensor *> *inputs_, std::vector<lite::
64, 76, 92, 80, 90, 87, 106, 55, 105, 60, 75, 53, 81, 81, 98, 81, 86, 59,
74, 82, 97, 105, 71, 67, 79, 87, 72, 79, 80, 76, 96, 80, 83, 71, 61, 79};
memcpy(weight_t->MutableData(), weight, sizeof(int8_t) * weight_t->ElementsNum());
auto *w_quant_arg = new QuantArg();
auto *w_quant_arg = new LiteQuantParam();
w_quant_arg->zeroPoint = 83, w_quant_arg->scale = 0.023649725490196;
weight_t->AddQuantParam(*w_quant_arg);
inputs_->push_back(weight_t);
auto *out_t = new Tensor(kNumberTypeInt8, {1, 7, 3, 2}, mindspore::NHWC, lite::Tensor::Category::VAR);
out_t->MallocData();
auto *out_quant_arg = new QuantArg();
auto *out_quant_arg = new LiteQuantParam();
out_quant_arg->zeroPoint = 31, out_quant_arg->scale = 0.3439215686275;
out_t->AddQuantParam(*out_quant_arg);
outputs_->push_back(out_t);

View File

@ -40,13 +40,13 @@ TEST_F(TestGatherNdInt8, GatherNdTest) {
std::vector<int> shape = {1, 2, 2, 5};
std::vector<int> out_shape = {1, 3, 5};
lite::QuantArg input_quant_arg;
lite::LiteQuantParam input_quant_arg;
input_quant_arg.scale = 0.5;
input_quant_arg.zeroPoint = 1;
lite::QuantArg input_quant_arg_1;
lite::LiteQuantParam input_quant_arg_1;
input_quant_arg_1.scale = 0.5;
input_quant_arg_1.zeroPoint = 2;
lite::QuantArg output_quant_arg;
lite::LiteQuantParam output_quant_arg;
output_quant_arg.scale = 1;
output_quant_arg.zeroPoint = 0;

View File

@ -38,13 +38,13 @@ TEST_F(TestGatherInt8, GatherTest) {
op_param.axis_ = 0;
std::vector<int> shape = {2, 1, 3, 2};
lite::QuantArg input_quant_arg;
lite::LiteQuantParam input_quant_arg;
input_quant_arg.scale = 0.1;
input_quant_arg.zeroPoint = 1;
lite::QuantArg input_quant_arg_1;
lite::LiteQuantParam input_quant_arg_1;
input_quant_arg_1.scale = 0.5;
input_quant_arg_1.zeroPoint = 2;
lite::QuantArg output_quant_arg;
lite::LiteQuantParam output_quant_arg;
output_quant_arg.scale = 0.1;
output_quant_arg.zeroPoint = 1;

View File

@ -39,8 +39,8 @@ TEST_F(TestHSwishInt8, HSwish) {
in_tensor.set_data(input_data);
out_tensor.set_data(output_data);
const lite::QuantArg quant_in = {0.0431373f, -35}; // -4.0 -- 7.0
const lite::QuantArg quant_out = {0.0392157f, -52}; // -3.0 -- 7.0
const lite::LiteQuantParam quant_in = {0.0431373f, -35}; // -4.0 -- 7.0
const lite::LiteQuantParam quant_out = {0.0392157f, -52}; // -3.0 -- 7.0
in_tensor.AddQuantParam(quant_in);
out_tensor.AddQuantParam(quant_out);

View File

@ -37,8 +37,8 @@ TEST_F(TestL2NormInt8, norm) {
in_tensor.set_data(input_data);
out_tensor.set_data(output_data);
const lite::QuantArg quant_in = {0.0470588244497776f, 0};
const lite::QuantArg quant_out = {0.0078125f, 0};
const lite::LiteQuantParam quant_in = {0.0470588244497776f, 0};
const lite::LiteQuantParam quant_out = {0.0078125f, 0};
in_tensor.AddQuantParam(quant_in);
out_tensor.AddQuantParam(quant_out);
@ -83,8 +83,8 @@ TEST_F(TestL2NormInt8, norm2) {
in_tensor.set_data(input_data);
out_tensor.set_data(output_data);
const lite::QuantArg quant_in = {0.0470588244f, 0};
const lite::QuantArg quant_out = {0.0078125f, 0};
const lite::LiteQuantParam quant_in = {0.0470588244f, 0};
const lite::LiteQuantParam quant_out = {0.0078125f, 0};
in_tensor.AddQuantParam(quant_in);
out_tensor.AddQuantParam(quant_out);

View File

@ -53,7 +53,7 @@ lite::Tensor *MakeQuantTensor(int8_t *data, int len, std::vector<int> *shape, fl
auto tensor_ptr = reinterpret_cast<int8_t *>(tensor->MutableData());
memcpy(tensor_ptr, data, len * sizeof(int8_t));
}
auto quant_arg = new mindspore::lite::QuantArg();
auto quant_arg = new mindspore::lite::LiteQuantParam();
quant_arg->zeroPoint = zp;
quant_arg->scale = scale;
tensor->AddQuantParam(*quant_arg);

View File

@ -42,10 +42,10 @@ TEST_F(TestMulInt8, Mul_quant0) {
int8_t output[12];
std::vector<int> output_shape = {2, 3, 2};
lite::QuantArg input_quant_arg;
lite::LiteQuantParam input_quant_arg;
input_quant_arg.scale = 1.0;
input_quant_arg.zeroPoint = 0;
lite::QuantArg output_quant_arg;
lite::LiteQuantParam output_quant_arg;
output_quant_arg.scale = 1.0;
output_quant_arg.zeroPoint = 0;
@ -115,10 +115,10 @@ TEST_F(TestMulInt8, Mul_quant0_thread0) {
int8_t output[18];
std::vector<int> output_shape = {2, 3, 3};
lite::QuantArg input_quant_arg;
lite::LiteQuantParam input_quant_arg;
input_quant_arg.scale = 1.0;
input_quant_arg.zeroPoint = 0;
lite::QuantArg output_quant_arg;
lite::LiteQuantParam output_quant_arg;
output_quant_arg.scale = 1.0;
output_quant_arg.zeroPoint = 0;
@ -188,10 +188,10 @@ TEST_F(TestMulInt8, Mul_quant1) {
int8_t output[12];
std::vector<int> output_shape = {2, 3, 2};
lite::QuantArg input_quant_arg;
lite::LiteQuantParam input_quant_arg;
input_quant_arg.scale = 1.0;
input_quant_arg.zeroPoint = 0;
lite::QuantArg output_quant_arg;
lite::LiteQuantParam output_quant_arg;
output_quant_arg.scale = 2.0;
output_quant_arg.zeroPoint = 0;
@ -261,10 +261,10 @@ TEST_F(TestMulInt8, Mul_quant1_thread1) {
int8_t output[12];
std::vector<int> output_shape = {2, 3, 2};
lite::QuantArg input_quant_arg;
lite::LiteQuantParam input_quant_arg;
input_quant_arg.scale = 1.0;
input_quant_arg.zeroPoint = 0;
lite::QuantArg output_quant_arg;
lite::LiteQuantParam output_quant_arg;
output_quant_arg.scale = 2.0;
output_quant_arg.zeroPoint = 0;
@ -334,10 +334,10 @@ TEST_F(TestMulInt8, test) {
int8_t output[12];
std::vector<int> output_shape = {2, 2, 3};
lite::QuantArg input_quant_arg;
lite::LiteQuantParam input_quant_arg;
input_quant_arg.scale = 1.0;
input_quant_arg.zeroPoint = 0;
lite::QuantArg output_quant_arg;
lite::LiteQuantParam output_quant_arg;
output_quant_arg.scale = 1.0;
output_quant_arg.zeroPoint = 0;

View File

@ -24,7 +24,7 @@
#include "src/runtime/kernel/arm/int8/pad_int8.h"
namespace mindspore {
using mindspore::lite::QuantArg;
using mindspore::lite::LiteQuantParam;
using mindspore::lite::Tensor;
class TestPadInt8 : public mindspore::CommonTest {
public:
@ -37,14 +37,14 @@ int PadInt8TestInit1(std::vector<Tensor *> *inputs_, std::vector<Tensor *> *outp
in_t->MallocData();
int8_t in[] = {1, 1, 1};
memcpy(in_t->MutableData(), in, sizeof(int8_t) * in_t->ElementsNum());
QuantArg *in_quant_arg = new QuantArg();
LiteQuantParam *in_quant_arg = new LiteQuantParam();
in_quant_arg->zeroPoint = 10, in_quant_arg->scale = 0.31228156;
in_t->AddQuantParam(*in_quant_arg);
inputs_->push_back(in_t);
Tensor *out_t = new Tensor(kNumberTypeInt8, {7}, mindspore::NHWC, lite::Tensor::CONST_TENSOR);
out_t->MallocData();
QuantArg *out_quant_arg = new QuantArg();
LiteQuantParam *out_quant_arg = new LiteQuantParam();
out_quant_arg->zeroPoint = 10, out_quant_arg->scale = 0.31228156;
out_t->AddQuantParam(*out_quant_arg);
outputs_->push_back(out_t);
@ -88,14 +88,14 @@ int PadInt8TestInit2(std::vector<Tensor *> *inputs_, std::vector<Tensor *> *outp
in_t->MallocData();
int8_t in[] = {18, 71, 99, -6, 5, -119, 86, 13, 15, -85, -41, -77};
memcpy(in_t->MutableData(), in, sizeof(int8_t) * in_t->ElementsNum());
QuantArg *in_quant_arg = new QuantArg();
LiteQuantParam *in_quant_arg = new LiteQuantParam();
in_quant_arg->zeroPoint = 10, in_quant_arg->scale = 0.31228156;
in_t->AddQuantParam(*in_quant_arg);
inputs_->push_back(in_t);
Tensor *out_t = new Tensor(kNumberTypeInt8, {10, 5}, mindspore::NHWC, lite::Tensor::VAR);
out_t->MallocData();
QuantArg *out_quant_arg = new QuantArg();
LiteQuantParam *out_quant_arg = new LiteQuantParam();
out_quant_arg->zeroPoint = 10, out_quant_arg->scale = 0.31228156;
out_t->AddQuantParam(*out_quant_arg);
outputs_->push_back(out_t);
@ -141,14 +141,14 @@ int PadInt8TestInit4(std::vector<Tensor *> *inputs_, std::vector<Tensor *> *outp
in_t->MallocData();
int8_t in[] = {73, 24, 7, -31, -109, -2, 69, -64, 51, -45, 38, 53};
memcpy(in_t->MutableData(), in, sizeof(int8_t) * in_t->ElementsNum());
QuantArg *in_quant_arg = new QuantArg();
LiteQuantParam *in_quant_arg = new LiteQuantParam();
in_quant_arg->zeroPoint = 10, in_quant_arg->scale = 0.31228156;
in_t->AddQuantParam(*in_quant_arg);
inputs_->push_back(in_t);
Tensor *out_t = new Tensor(kNumberTypeInt8, {6, 6, 4, 3}, mindspore::NHWC, lite::Tensor::VAR);
out_t->MallocData();
QuantArg *out_quant_arg = new QuantArg();
LiteQuantParam *out_quant_arg = new LiteQuantParam();
out_quant_arg->zeroPoint = 10, out_quant_arg->scale = 0.31228156;
out_t->AddQuantParam(*out_quant_arg);
outputs_->push_back(out_t);

View File

@ -39,10 +39,10 @@ TEST_F(TestPowerInt8, PowerInt8) {
op_param.scale_ = 1;
op_param.shift_ = 0;
lite::QuantArg input_quant_arg;
lite::LiteQuantParam input_quant_arg;
input_quant_arg.scale = 0.0156863;
input_quant_arg.zeroPoint = -128;
lite::QuantArg output_quant_arg;
lite::LiteQuantParam output_quant_arg;
output_quant_arg.scale = 0.0627451;
output_quant_arg.zeroPoint = -128;
@ -96,15 +96,15 @@ TEST_F(TestPowerInt8, normal) {
op_param.scale_ = 1;
op_param.shift_ = 0;
lite::QuantArg input_quant_arg;
lite::LiteQuantParam input_quant_arg;
input_quant_arg.scale = 0.0156863;
input_quant_arg.zeroPoint = -128;
lite::QuantArg exp_quant_arg;
lite::LiteQuantParam exp_quant_arg;
exp_quant_arg.scale = 0.0156863;
exp_quant_arg.zeroPoint = -128;
lite::QuantArg output_quant_arg;
lite::LiteQuantParam output_quant_arg;
output_quant_arg.scale = 0.0352941;
output_quant_arg.zeroPoint = -128;

View File

@ -39,10 +39,10 @@ TEST_F(TestPreluInt8, prelu_1) {
const int output_size = 8;
int8_t output[8];
std::vector<int> output_shape = {8};
lite::QuantArg input_quant_arg;
lite::LiteQuantParam input_quant_arg;
input_quant_arg.scale = 1.0;
input_quant_arg.zeroPoint = 0;
lite::QuantArg output_quant_arg;
lite::LiteQuantParam output_quant_arg;
output_quant_arg.scale = 1.0;
output_quant_arg.zeroPoint = 0;

View File

@ -31,7 +31,7 @@ class QuantDTypeCastTestFp32 : public mindspore::CommonTest {
};
TEST_F(QuantDTypeCastTestFp32, QuantDTypeCastTest1) {
const lite::QuantArg quant_arg{0.21176, 5};
const lite::LiteQuantParam quant_arg{0.21176, 5};
QuantDTypeCastParameter param;
param.srcT = kNumberTypeInt8;
param.dstT = kNumberTypeFloat32;
@ -83,7 +83,7 @@ TEST_F(QuantDTypeCastTestFp32, QuantDTypeCastTest1) {
}
TEST_F(QuantDTypeCastTestFp32, QuantDTypeCastTest2) {
const lite::QuantArg quant_arg = {0.3515625, -57};
const lite::LiteQuantParam quant_arg = {0.3515625, -57};
QuantDTypeCastParameter param;
param.op_parameter_.type_ = schema::PrimitiveType_QuantDTypeCast;
param.dstT = kNumberTypeInt8;

View File

@ -22,7 +22,7 @@
#include "nnacl/fp32/reduce_fp32.h"
namespace mindspore {
using mindspore::lite::QuantArg;
using mindspore::lite::LiteQuantParam;
using mindspore::lite::Tensor;
using mindspore::schema::ReduceMode;
using mindspore::schema::ReduceMode_ReduceMax;
@ -51,8 +51,8 @@ class TestReduceInt8 : public mindspore::CommonTest {
kernel::KernelCreator creator_ = nullptr;
lite::InnerContext ctx_ = lite::InnerContext();
kernel::InnerKernel *kernel_ = nullptr;
const QuantArg quant_in_ = {0.005f, 5};
const QuantArg quant_out_ = {0.01f, 1};
const LiteQuantParam quant_in_ = {0.005f, 5};
const LiteQuantParam quant_out_ = {0.01f, 1};
float err_tol_ = 0.05;
};

View File

@ -37,8 +37,8 @@ TEST_F(TestReluXInt8, Relu) {
in_tensor.set_data(input_data);
out_tensor.set_data(output_data);
const lite::QuantArg quant_in = {0.00784314f, 0}; // -1.0--1.0 ->
const lite::QuantArg quant_out = {0.00784314f, 0};
const lite::LiteQuantParam quant_in = {0.00784314f, 0}; // -1.0--1.0 ->
const lite::LiteQuantParam quant_out = {0.00784314f, 0};
in_tensor.AddQuantParam(quant_in);
out_tensor.AddQuantParam(quant_out);
@ -83,8 +83,8 @@ TEST_F(TestReluXInt8, Relu6) {
in_tensor.set_data(input_data);
out_tensor.set_data(output_data);
const lite::QuantArg quant_in = {0.0509804f, -69}; // -3.0 -- 10.0
const lite::QuantArg quant_out = {0.0392157f, -128}; // 0.0 -- 10.0
const lite::LiteQuantParam quant_in = {0.0509804f, -69}; // -3.0 -- 10.0
const lite::LiteQuantParam quant_out = {0.0392157f, -128}; // 0.0 -- 10.0
in_tensor.AddQuantParam(quant_in);
out_tensor.AddQuantParam(quant_out);

View File

@ -38,10 +38,10 @@ TEST_F(TestReshapeInt8, reshape_quant0) {
int8_t output[12];
std::vector<int> output_shape = {2, 6};
lite::QuantArg input_quant_arg;
lite::LiteQuantParam input_quant_arg;
input_quant_arg.scale = 1.0;
input_quant_arg.zeroPoint = 0;
lite::QuantArg output_quant_arg;
lite::LiteQuantParam output_quant_arg;
output_quant_arg.scale = 1.0;
output_quant_arg.zeroPoint = 0;
@ -100,10 +100,10 @@ TEST_F(TestReshapeInt8, reshape_quant1_thread2) {
int8_t output[12];
std::vector<int> output_shape = {2, 6};
lite::QuantArg input_quant_arg;
lite::LiteQuantParam input_quant_arg;
input_quant_arg.scale = 1.0;
input_quant_arg.zeroPoint = 0;
lite::QuantArg output_quant_arg;
lite::LiteQuantParam output_quant_arg;
output_quant_arg.scale = 2.0;
output_quant_arg.zeroPoint = 1;

View File

@ -23,7 +23,7 @@
#include "nnacl/int8/resize_int8.h"
namespace mindspore {
using mindspore::lite::QuantArg;
using mindspore::lite::LiteQuantParam;
using mindspore::lite::Tensor;
class TestResizeBilinearInt8 : public mindspore::CommonTest {
@ -31,8 +31,8 @@ class TestResizeBilinearInt8 : public mindspore::CommonTest {
TestResizeBilinearInt8() = default;
void TearDown() override;
void Prepare(const std::vector<int> &in_shape, const std::vector<int> &out_shape, int8_t *input_data,
int8_t *output_data, const QuantArg quant_in, const QuantArg quant_out, const bool align_corners,
const int thread_num);
int8_t *output_data, const LiteQuantParam quant_in, const LiteQuantParam quant_out,
const bool align_corners, const int thread_num);
std::vector<lite::Tensor *> inputs;
std::vector<lite::Tensor *> outputs;
ResizeParameter param_ = {};
@ -52,8 +52,8 @@ void TestResizeBilinearInt8::TearDown() {
}
void TestResizeBilinearInt8::Prepare(const std::vector<int> &in_shape, const std::vector<int> &out_shape,
int8_t *input_data, int8_t *output_data, const mindspore::QuantArg quant_in,
const mindspore::QuantArg quant_out, const bool align_corners,
int8_t *input_data, int8_t *output_data, const mindspore::LiteQuantParam quant_in,
const mindspore::LiteQuantParam quant_out, const bool align_corners,
const int thread_num) {
in_tensor.set_data_type(kNumberTypeInt8);
in_tensor.set_shape(in_shape);
@ -89,8 +89,8 @@ TEST_F(TestResizeBilinearInt8, Bilinear0) {
int8_t output_data[16] = {0};
std::vector<int> in_shape = {1, 2, 2, 1};
std::vector<int> out_shape = {1, 4, 4, 1};
const lite::QuantArg quant_in = {0.005f, 0};
const lite::QuantArg quant_out = {0.008f, 0};
const lite::LiteQuantParam quant_in = {0.005f, 0};
const lite::LiteQuantParam quant_out = {0.008f, 0};
bool align_corners = false;
int thread_num = 1;
int8_t expect[16] = {0, 0, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 2, 2, 2};
@ -108,8 +108,8 @@ TEST_F(TestResizeBilinearInt8, Bilinear1) {
int8_t input_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39};
int8_t output_data[160] = {0};
const lite::QuantArg quant_in = {0.005f, 0};
const lite::QuantArg quant_out = {0.008f, 0};
const lite::LiteQuantParam quant_in = {0.005f, 0};
const lite::LiteQuantParam quant_out = {0.008f, 0};
int thread_num = 1;
bool align_corners = false;
int8_t expect[160] = {0, 1, 1, 2, 2, 2, 2, 3, 3, 4, 3, 4, 4, 5, 6, 3, 4, 4, 5, 6, 3, 4, 4,
@ -134,8 +134,8 @@ TEST_F(TestResizeBilinearInt8, Bilinear2) {
20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39};
int8_t output_data[160] = {0};
const lite::QuantArg quant_in = {0.005f, 0};
const lite::QuantArg quant_out = {0.008f, 0};
const lite::LiteQuantParam quant_in = {0.005f, 0};
const lite::LiteQuantParam quant_out = {0.008f, 0};
int thread_num = 2;
bool align_corners = true;
int8_t expect[160] = {0, 1, 1, 2, 2, 1, 2, 2, 3, 4, 2, 3, 3, 4, 5, 3, 4, 4, 5, 6, 2, 3, 3,
@ -160,8 +160,8 @@ TEST_F(TestResizeBilinearInt8, Bilinear3) {
20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39};
int8_t output_data[160] = {0};
const lite::QuantArg quant_in = {0.005f, 2};
const lite::QuantArg quant_out = {0.005f, 2};
const lite::LiteQuantParam quant_in = {0.005f, 2};
const lite::LiteQuantParam quant_out = {0.005f, 2};
int thread_num = 2;
bool align_corners = true;
int8_t expect[160] = {0, 1, 2, 3, 4, 2, 3, 4, 5, 6, 3, 4, 5, 6, 7, 5, 6, 7, 8, 9, 3, 4, 5,

View File

@ -23,15 +23,15 @@
#include "nnacl/int8/resize_int8.h"
namespace mindspore {
using mindspore::lite::QuantArg;
using mindspore::lite::LiteQuantParam;
using mindspore::lite::Tensor;
class TestResizeNearestNeighborInt8 : public mindspore::CommonTest {
public:
TestResizeNearestNeighborInt8() = default;
void Prepare(const std::vector<int> &in_shape, const std::vector<int> &out_shape, int8_t *input_data,
int8_t *output_data, const QuantArg quant_in, const QuantArg quant_out, const bool align_corners,
const int thread_num);
int8_t *output_data, const LiteQuantParam quant_in, const LiteQuantParam quant_out,
const bool align_corners, const int thread_num);
void TearDown() override;
std::vector<lite::Tensor *> inputs;
@ -48,8 +48,9 @@ class TestResizeNearestNeighborInt8 : public mindspore::CommonTest {
};
void TestResizeNearestNeighborInt8::Prepare(const std::vector<int> &in_shape, const std::vector<int> &out_shape,
int8_t *input_data, int8_t *output_data, const QuantArg quant_in,
const QuantArg quant_out, const bool align_corners, const int thread_num) {
int8_t *input_data, int8_t *output_data, const LiteQuantParam quant_in,
const LiteQuantParam quant_out, const bool align_corners,
const int thread_num) {
in_tensor.set_data_type(kNumberTypeInt8);
in_tensor.set_shape(in_shape);
in_tensor.set_data(input_data);
@ -88,8 +89,8 @@ void TestResizeNearestNeighborInt8::TearDown() {
TEST_F(TestResizeNearestNeighborInt8, NearestNeighbor0) {
std::vector<int> in_shape = {1, 2, 2, 1};
std::vector<int> out_shape = {1, 4, 4, 1};
QuantArg quant_in = {0.00390625, 2};
QuantArg quant_out = {0.015625, 5};
LiteQuantParam quant_in = {0.00390625, 2};
LiteQuantParam quant_out = {0.015625, 5};
int8_t input_data[] = {0, 1, 2, 3};
const int out_element_num = 16;
int8_t output_data[out_element_num] = {0};
@ -108,8 +109,8 @@ TEST_F(TestResizeNearestNeighborInt8, NearestNeighbor0) {
TEST_F(TestResizeNearestNeighborInt8, NearestNeighbor1) {
std::vector<int> in_shape = {2, 2, 2, 5};
std::vector<int> out_shape = {2, 4, 4, 5};
QuantArg quant_in = {0.00390625, 2};
QuantArg quant_out = {0.015625, 5};
LiteQuantParam quant_in = {0.00390625, 2};
LiteQuantParam quant_out = {0.015625, 5};
int8_t input_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39};
const int out_element_num = 160;
@ -134,8 +135,8 @@ TEST_F(TestResizeNearestNeighborInt8, NearestNeighbor1) {
TEST_F(TestResizeNearestNeighborInt8, NearestNeighbor2) {
std::vector<int> in_shape = {2, 2, 2, 5};
std::vector<int> out_shape = {2, 4, 4, 5};
QuantArg quant_in = {0.00390625, 2};
QuantArg quant_out = {0.015625, 5};
LiteQuantParam quant_in = {0.00390625, 2};
LiteQuantParam quant_out = {0.015625, 5};
int8_t input_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39};
const int out_element_num = 160;
@ -161,8 +162,8 @@ TEST_F(TestResizeNearestNeighborInt8, NearestNeighbor2) {
TEST_F(TestResizeNearestNeighborInt8, NearestNeighbor3) {
std::vector<int> in_shape = {2, 2, 2, 5};
std::vector<int> out_shape = {2, 4, 4, 5};
QuantArg quant_in = {0.00390625, 2};
QuantArg quant_out = {0.00390625, 2};
LiteQuantParam quant_in = {0.00390625, 2};
LiteQuantParam quant_out = {0.00390625, 2};
int8_t input_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39};
const int out_element_num = 160;
@ -187,8 +188,8 @@ TEST_F(TestResizeNearestNeighborInt8, NearestNeighbor3) {
TEST_F(TestResizeNearestNeighborInt8, NearestNeighbor4) {
std::vector<int> in_shape = {2, 2, 2, 5};
std::vector<int> out_shape = {2, 4, 4, 5};
QuantArg quant_in = {0.00390625, 2};
QuantArg quant_out = {0.00390625, 2};
LiteQuantParam quant_in = {0.00390625, 2};
LiteQuantParam quant_out = {0.00390625, 2};
int8_t input_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39};
const int out_element_num = 160;

View File

@ -22,7 +22,7 @@
#include "nnacl/int8/scale_int8.h"
namespace mindspore {
using mindspore::lite::QuantArg;
using mindspore::lite::LiteQuantParam;
using mindspore::lite::Tensor;
class TestScaleInt8 : public mindspore::CommonTest {
@ -47,10 +47,10 @@ class TestScaleInt8 : public mindspore::CommonTest {
kernel::KernelCreator creator_ = nullptr;
lite::InnerContext ctx_ = lite::InnerContext();
kernel::InnerKernel *kernel_ = nullptr;
const QuantArg quant_in_ = {0.005f, 5};
const QuantArg quant_scale_ = {0.1f, 1};
const QuantArg quant_bias_ = {0.002f, 2};
const QuantArg quant_out_ = {0.01f, 1};
const LiteQuantParam quant_in_ = {0.005f, 5};
const LiteQuantParam quant_scale_ = {0.1f, 1};
const LiteQuantParam quant_bias_ = {0.002f, 2};
const LiteQuantParam quant_out_ = {0.01f, 1};
float err_tol_ = 0.05;
};

View File

@ -36,8 +36,8 @@ TEST_F(TestSigmoidInt8, Sigmoid) {
in_tensor.set_data(input_data);
out_tensor.set_data(output_data);
const lite::QuantArg quant_in = {1.0, 0}; // -4.0 -- 7.0
const lite::QuantArg quant_out = {1.0, 0}; // -3.0 -- 7.0
const lite::LiteQuantParam quant_in = {1.0, 0}; // -4.0 -- 7.0
const lite::LiteQuantParam quant_out = {1.0, 0}; // -3.0 -- 7.0
in_tensor.AddQuantParam(quant_in);
out_tensor.AddQuantParam(quant_out);

View File

@ -43,8 +43,8 @@ TEST_F(TestSliceInt8, SliceInt8) {
int size_data[4] = {1, 2, 2, 3};
size_tensor.set_data(size_data);
const lite::QuantArg quant_in0 = {0.00784314f, 0}; // -1.0--1.0 -> 0--255
const lite::QuantArg quant_out = {0.00784314f, 0};
const lite::LiteQuantParam quant_in0 = {0.00784314f, 0}; // -1.0--1.0 -> 0--255
const lite::LiteQuantParam quant_out = {0.00784314f, 0};
in_tensor.AddQuantParam(quant_in0);
out_tensor.AddQuantParam(quant_out);
@ -95,8 +95,8 @@ TEST_F(TestSliceInt8, Slice5D) {
int size_data[5] = {1, 1, 2, 2, 3};
size_tensor.set_data(size_data);
const lite::QuantArg quant_in0 = {0.00784314f, 0}; // -1.0--1.0 -> 0--255
const lite::QuantArg quant_out = {0.00784314f, 0};
const lite::LiteQuantParam quant_in0 = {0.00784314f, 0}; // -1.0--1.0 -> 0--255
const lite::LiteQuantParam quant_out = {0.00784314f, 0};
in_tensor.AddQuantParam(quant_in0);
out_tensor.AddQuantParam(quant_out);
@ -147,8 +147,8 @@ TEST_F(TestSliceInt8, Slice6D) {
int size_data[6] = {1, 1, 1, 2, 2, 3};
size_tensor.set_data(size_data);
const lite::QuantArg quant_in0 = {0.00784314f, 0}; // -1.0--1.0 -> 0--255
const lite::QuantArg quant_out = {0.00784314f, 0};
const lite::LiteQuantParam quant_in0 = {0.00784314f, 0}; // -1.0--1.0 -> 0--255
const lite::LiteQuantParam quant_out = {0.00784314f, 0};
in_tensor.AddQuantParam(quant_in0);
out_tensor.AddQuantParam(quant_out);
@ -199,8 +199,8 @@ TEST_F(TestSliceInt8, Slice7D) {
int size_data[7] = {1, 1, 1, 1, 2, 2, 3};
size_tensor.set_data(size_data);
const lite::QuantArg quant_in0 = {0.00784314f, 0}; // -1.0--1.0 -> 0--255
const lite::QuantArg quant_out = {0.00784314f, 0};
const lite::LiteQuantParam quant_in0 = {0.00784314f, 0}; // -1.0--1.0 -> 0--255
const lite::LiteQuantParam quant_out = {0.00784314f, 0};
in_tensor.AddQuantParam(quant_in0);
out_tensor.AddQuantParam(quant_out);
@ -252,8 +252,8 @@ TEST_F(TestSliceInt8, Slice8D) {
in_tensor.set_data(input_data);
out_tensor.set_data(output_data);
const lite::QuantArg quant_in0 = {0.00784314f, 0}; // -1.0--1.0 -> 0--255
const lite::QuantArg quant_out = {0.00784314f, 0};
const lite::LiteQuantParam quant_in0 = {0.00784314f, 0}; // -1.0--1.0 -> 0--255
const lite::LiteQuantParam quant_out = {0.00784314f, 0};
in_tensor.AddQuantParam(quant_in0);
out_tensor.AddQuantParam(quant_out);
@ -305,8 +305,8 @@ TEST_F(TestSliceInt8, SliceDiffQuantArgs) {
in_tensor.set_data(input_data);
out_tensor.set_data(output_data);
const lite::QuantArg quant_in0 = {0.00784314f, 0}; // -1.0--1.0 -> 0--255
const lite::QuantArg quant_out = {0.01568628f, 0};
const lite::LiteQuantParam quant_in0 = {0.00784314f, 0}; // -1.0--1.0 -> 0--255
const lite::LiteQuantParam quant_out = {0.01568628f, 0};
in_tensor.AddQuantParam(quant_in0);
out_tensor.AddQuantParam(quant_out);
@ -358,8 +358,8 @@ TEST_F(TestSliceInt8, SliceSingleThread) {
in_tensor.set_data(input_data);
out_tensor.set_data(output_data);
const lite::QuantArg quant_in0 = {0.00784314f, 0}; // -1.0--1.0 -> 0--255
const lite::QuantArg quant_out = {0.00784314f, 0};
const lite::LiteQuantParam quant_in0 = {0.00784314f, 0}; // -1.0--1.0 -> 0--255
const lite::LiteQuantParam quant_out = {0.00784314f, 0};
in_tensor.AddQuantParam(quant_in0);
out_tensor.AddQuantParam(quant_out);
@ -413,8 +413,8 @@ TEST_F(TestSliceInt8, Slice4Thread) {
in_tensor.set_data(input_data);
out_tensor.set_data(output_data);
const lite::QuantArg quant_in0 = {0.00784314f, 0}; // -1.0--1.0 -> 0--255
const lite::QuantArg quant_out = {0.00784314f, 0};
const lite::LiteQuantParam quant_in0 = {0.00784314f, 0}; // -1.0--1.0 -> 0--255
const lite::LiteQuantParam quant_out = {0.00784314f, 0};
in_tensor.AddQuantParam(quant_in0);
out_tensor.AddQuantParam(quant_out);

View File

@ -42,10 +42,10 @@ TEST_F(TestSoftmaxInt8, SoftmaxInt8) {
op_param.input_shape_[2] = 3;
op_param.input_shape_[3] = 4;
lite::QuantArg input_quant_arg;
lite::LiteQuantParam input_quant_arg;
input_quant_arg.scale = 0.0352941;
input_quant_arg.zeroPoint = -128;
lite::QuantArg output_quant_arg;
lite::LiteQuantParam output_quant_arg;
output_quant_arg.scale = 0.00392157;
output_quant_arg.zeroPoint = -128;

View File

@ -43,10 +43,10 @@ TEST_F(TestSplitInt8, Split_quant0_thread2) {
std::vector<int> output1_shape = {2, 1, 2};
std::vector<int> output2_shape = {2, 2, 2};
lite::QuantArg input_quant_arg;
lite::LiteQuantParam input_quant_arg;
input_quant_arg.scale = 1.0;
input_quant_arg.zeroPoint = 0;
lite::QuantArg output_quant_arg;
lite::LiteQuantParam output_quant_arg;
output_quant_arg.scale = 1.0;
output_quant_arg.zeroPoint = 0;
@ -130,10 +130,10 @@ TEST_F(TestSplitInt8, Split_quant0_thread2_num) {
std::vector<int> output2_shape = {2, 1, 2};
std::vector<int> output3_shape = {2, 1, 2};
lite::QuantArg input_quant_arg;
lite::LiteQuantParam input_quant_arg;
input_quant_arg.scale = 1.0;
input_quant_arg.zeroPoint = 0;
lite::QuantArg output_quant_arg;
lite::LiteQuantParam output_quant_arg;
output_quant_arg.scale = 1.0;
output_quant_arg.zeroPoint = 0;
@ -226,10 +226,10 @@ TEST_F(TestSplitInt8, Split_quant1_thread2_num) {
std::vector<int> output2_shape = {2, 1, 2};
std::vector<int> output3_shape = {2, 1, 2};
lite::QuantArg input_quant_arg;
lite::LiteQuantParam input_quant_arg;
input_quant_arg.scale = 1.0;
input_quant_arg.zeroPoint = 0;
lite::QuantArg output_quant_arg;
lite::LiteQuantParam output_quant_arg;
output_quant_arg.scale = 2.0;
output_quant_arg.zeroPoint = 0;

View File

@ -39,10 +39,10 @@ TEST_F(TestSqueezeInt8, Squeeze_1d_axis0_offset0_quant0_thread2) {
const int output_size = 8;
int8_t output[8];
std::vector<int> output_shape = {8};
lite::QuantArg input_quant_arg;
lite::LiteQuantParam input_quant_arg;
input_quant_arg.scale = 1.0;
input_quant_arg.zeroPoint = 0;
lite::QuantArg output_quant_arg;
lite::LiteQuantParam output_quant_arg;
output_quant_arg.scale = 1.0;
output_quant_arg.zeroPoint = 0;

View File

@ -40,9 +40,9 @@ TEST_F(TestSubInt8, SubInt8) {
in_tensor1.set_data(input_data1);
out_tensor.set_data(output_data);
const lite::QuantArg quant_in0 = {0.00784314f, 0}; // -1.0--1.0 -> 0--255
const lite::QuantArg quant_in1 = {0.00784314f, 0};
const lite::QuantArg quant_out = {0.00784314f, 0};
const lite::LiteQuantParam quant_in0 = {0.00784314f, 0}; // -1.0--1.0 -> 0--255
const lite::LiteQuantParam quant_in1 = {0.00784314f, 0};
const lite::LiteQuantParam quant_out = {0.00784314f, 0};
in_tensor0.AddQuantParam(quant_in0);
in_tensor1.AddQuantParam(quant_in1);
out_tensor.AddQuantParam(quant_out);
@ -89,9 +89,9 @@ TEST_F(TestSubInt8, SubInt8T2) {
in_tensor1.set_data(input_data1);
out_tensor.set_data(output_data);
const lite::QuantArg quant_in0 = {0.00784314f, 0}; // -1.0--1.0 -> 0--255
const lite::QuantArg quant_in1 = {0.00784314f, 0};
const lite::QuantArg quant_out = {0.00784314f, 0};
const lite::LiteQuantParam quant_in0 = {0.00784314f, 0}; // -1.0--1.0 -> 0--255
const lite::LiteQuantParam quant_in1 = {0.00784314f, 0};
const lite::LiteQuantParam quant_out = {0.00784314f, 0};
in_tensor0.AddQuantParam(quant_in0);
in_tensor1.AddQuantParam(quant_in1);
out_tensor.AddQuantParam(quant_out);

View File

@ -38,10 +38,10 @@ TEST_F(TestUnsqueezeInt8, Unsqueeze_1) {
const int output_size = 8;
int8_t output[8];
std::vector<int> output_shape = {8, 1};
lite::QuantArg input_quant_arg;
lite::LiteQuantParam input_quant_arg;
input_quant_arg.scale = 1.0;
input_quant_arg.zeroPoint = 0;
lite::QuantArg output_quant_arg;
lite::LiteQuantParam output_quant_arg;
output_quant_arg.scale = 1.0;
output_quant_arg.zeroPoint = 0;

View File

@ -272,7 +272,7 @@ lite::STATUS CopyQuantParams(const CNodePtr &cnode, const std::vector<Tensor *>
auto input_quant_params = quant_param_holder->get_input_quant_params();
for (size_t m = 0; m < input_quant_params.size(); m++) {
for (auto inputQuantParam : input_quant_params[m]) {
lite::QuantArg quant_arg{};
lite::LiteQuantParam quant_arg{};
quant_arg.scale = inputQuantParam.scale;
quant_arg.zeroPoint = inputQuantParam.zeroPoint;
quant_arg.roundType = inputQuantParam.roundType;
@ -283,7 +283,7 @@ lite::STATUS CopyQuantParams(const CNodePtr &cnode, const std::vector<Tensor *>
auto output_quant_params = quant_param_holder->get_output_quant_params();
for (size_t m = 0; m < output_quant_params.size(); m++) {
for (auto outputQuantParam : output_quant_params[m]) {
lite::QuantArg quant_arg{};
lite::LiteQuantParam quant_arg{};
quant_arg.scale = outputQuantParam.scale;
quant_arg.zeroPoint = outputQuantParam.zeroPoint;
quant_arg.roundType = outputQuantParam.roundType;