!23857 [MSLITE] fix some magic numbers in codegen

Merge pull request !23857 from zhanyuan/dev
This commit is contained in:
i-robot 2021-09-23 02:33:17 +00:00 committed by Gitee
commit 5be36146c5
6 changed files with 9 additions and 7 deletions

View File

@ -34,5 +34,4 @@ std::string KernelRegistry::GenKernelInterface(const char *func, const char *par
return "int " + std::string(func) + "(TensorC *inputs, int input_num, TensorC *outputs, int output_num, " +
std::string(param) + " *param);";
}
} // namespace mindspore::lite::micro

View File

@ -93,7 +93,7 @@ int ArithmeticFP32Coder::ReSize(CoderContext *const context) {
if (arithmetic_parameter_->broadcasting_) {
outside_ = 1;
int resize_n_index = static_cast<int>(arithmetic_parameter_->ndim_) - 1;
if (resize_n_index < 0 || resize_n_index >= 10) {
if (resize_n_index < 0 || resize_n_index >= static_cast<int>(max_dims_)) {
return RET_ERROR;
}
for (auto i = resize_n_index; i >= 0; --i) {
@ -149,7 +149,7 @@ bool ArithmeticFP32Coder::IsBatchScalarCalc() {
return false;
}
size_t break_axis = 0;
MS_CHECK_TRUE_RET(arithmetic_parameter_->ndim_ <= 10, false);
MS_CHECK_TRUE_RET(arithmetic_parameter_->ndim_ <= max_dims_, false);
for (size_t i = 0; i < arithmetic_parameter_->ndim_; i++) {
if (arithmetic_parameter_->in_shape0_[i] != arithmetic_parameter_->in_shape1_[i]) {
break_axis = i;
@ -250,6 +250,7 @@ int ArithmeticFP32Coder::Prepare(CoderContext *const context) {
MS_CHECK_RET_CODE(CheckDataType(), "ArithmeticFP32Coder check datatype fail");
MS_CHECK_PTR(parameter_);
arithmetic_parameter_ = reinterpret_cast<ArithmeticParameter *>(parameter_);
max_dims_ = sizeof(arithmetic_parameter_->in_shape0_) / sizeof(int);
auto primitive_type = arithmetic_parameter_->op_parameter_.type_;
if (primitive_type == schema::PrimitiveType_Eltwise) {
switch (arithmetic_parameter_->eltwise_mode_) {

View File

@ -133,6 +133,8 @@ class ArithmeticFP32Coder final : public OperatorCoder {
int data_type_len_{0};
size_t max_dims_{10};
bool input0_broadcast_{false};
bool input1_broadcast_{false};

View File

@ -24,7 +24,7 @@
namespace mindspore::lite::micro::nnacl {
int DivInt8Coder::Prepare(CoderContext *context) {
MS_CHECK_TRUE_RET(input_tensors_.size() == 2, RET_ERROR);
MS_CHECK_TRUE_RET(input_tensors_.size() == kInputSize1, RET_ERROR);
input0 = input_tensors_.at(0);
input1 = input_tensors_.at(1);
MS_ASSERT(input0);
@ -56,7 +56,8 @@ int DivInt8Coder::DoCode(CoderContext *const context) {
if (broadcast_) {
ArithmeticParameter tile_para = {0};
tile_para.ndim_ = output_tensor_->shape().size();
MS_CHECK_TRUE_RET(tile_para.ndim_ <= 10, RET_ERROR);
size_t max_dim = sizeof(tile_para.in_shape0_) / sizeof(int);
MS_CHECK_TRUE_RET(tile_para.ndim_ <= max_dim, RET_ERROR);
for (size_t i = 0; i < tile_para.ndim_; i++) {
tile_para.in_shape0_[i] = input0->DimensionSize(i);
tile_para.in_shape1_[i] = input1->DimensionSize(i);

View File

@ -37,7 +37,7 @@ int FullConnectionInt8Coder::Prepare(CoderContext *const context) {
// only support one thread currently
thread_count_ = thread_num_;
param_ = reinterpret_cast<MatMulParameter *>(parameter_);
MS_CHECK_TRUE_RET(input_tensors_.size() >= 2, RET_ERROR);
MS_CHECK_TRUE_RET(input_tensors_.size() >= kInputSize1, RET_ERROR);
filter_tensor_ = input_tensors_.at(kWeightIndex);
MS_CHECK_PTR(filter_tensor_);
if (input_tensors_.size() == kInputSize2) {

View File

@ -23,7 +23,6 @@
using mindspore::schema::PrimitiveType_Transpose;
namespace mindspore::lite::micro::nnacl {
int TransposeInt8Coder::Prepare(CoderContext *const context) {
auto in_tensor = input_tensors_.front();
auto out_tensor = output_tensors_.front();