forked from mindspore-Ecosystem/mindspore
code check
This commit is contained in:
parent
66a9142153
commit
0fa81a6516
|
@ -0,0 +1,27 @@
|
|||
/**
|
||||
* Copyright 2020 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#ifndef MINDSPORE_NNACL_WHILE_PARAMETER_H_
|
||||
#define MINDSPORE_NNACL_WHILE_PARAMETER_H_
|
||||
|
||||
#include "nnacl/op_base.h"
|
||||
|
||||
typedef struct WhileParemeter {
|
||||
OpParameter op_parameter_;
|
||||
int body_subgraph_index = 0;
|
||||
int cond_subgraph_index = 0;
|
||||
} WhileParemeter;
|
||||
|
||||
#endif // MINDSPORE_NNACL_WHILE_PARAMETER_H_
|
|
@ -21,7 +21,7 @@
|
|||
namespace mindspore {
|
||||
namespace lite {
|
||||
schema::Tensor *AttrToTensor(void *data, int data_size, bool is_array, TypeId type_id,
|
||||
std::vector<char *> *tensor_bufs) {
|
||||
std::vector<char *> *const tensor_bufs) {
|
||||
if (data == nullptr || tensor_bufs == nullptr) {
|
||||
MS_LOG(ERROR) << "the parameter of this function is nullptr.";
|
||||
return nullptr;
|
||||
|
@ -50,7 +50,8 @@ schema::Tensor *AttrToTensor(void *data, int data_size, bool is_array, TypeId ty
|
|||
fbb.Clear();
|
||||
return nullptr;
|
||||
}
|
||||
auto tensor_buf = reinterpret_cast<char *>(malloc(fbb.GetSize()));
|
||||
size_t byte_num = fbb.GetSize();
|
||||
auto tensor_buf = reinterpret_cast<char *>(malloc(byte_num));
|
||||
if (tensor_buf == nullptr) {
|
||||
MS_LOG(ERROR) << "malloc primitive_buf_ failed";
|
||||
fbb.Clear();
|
||||
|
|
|
@ -27,7 +27,7 @@
|
|||
namespace mindspore {
|
||||
namespace lite {
|
||||
schema::Tensor *AttrToTensor(void *data, int data_size, bool is_array, TypeId type_id,
|
||||
std::vector<char *> *tensor_bufs);
|
||||
std::vector<char *> *const tensor_bufs);
|
||||
} // namespace lite
|
||||
} // namespace mindspore
|
||||
|
||||
|
|
|
@ -29,7 +29,7 @@ namespace mindspore {
|
|||
namespace lite {
|
||||
// compatibility, transfer attr to input tensor.
|
||||
typedef int (*TransferAttrFunc)(Model::Node *node, std::vector<schema::Tensor *> *tensor,
|
||||
std::vector<char *> *tensor_bufs);
|
||||
std::vector<char *> *const tensor_bufs);
|
||||
class CompatRegistry {
|
||||
public:
|
||||
static CompatRegistry *GetInstance() {
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
namespace mindspore {
|
||||
namespace lite {
|
||||
int TransferBroadcastToAttr(Model::Node *node, std::vector<schema::Tensor *> *dst_tensors,
|
||||
std::vector<char *> *tensor_bufs) {
|
||||
std::vector<char *> *const tensor_bufs) {
|
||||
if (node == nullptr || node->primitive_ == nullptr || dst_tensors == nullptr || tensor_bufs == nullptr) {
|
||||
MS_LOG(ERROR) << "the parameter of this function is nullptr.";
|
||||
return RET_ERROR;
|
||||
|
|
|
@ -19,7 +19,8 @@
|
|||
|
||||
namespace mindspore {
|
||||
namespace lite {
|
||||
int TransferCastAttr(Model::Node *node, std::vector<schema::Tensor *> *dst_tensors, std::vector<char *> *tensor_bufs) {
|
||||
int TransferCastAttr(Model::Node *node, std::vector<schema::Tensor *> *dst_tensors,
|
||||
std::vector<char *> *const tensor_bufs) {
|
||||
if (node == nullptr || node->primitive_ == nullptr || dst_tensors == nullptr || tensor_bufs == nullptr) {
|
||||
MS_LOG(ERROR) << "the parameter of this function is nullptr.";
|
||||
return RET_ERROR;
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
namespace mindspore {
|
||||
namespace lite {
|
||||
int TransferExpandDimsAttr(Model::Node *node, std::vector<schema::Tensor *> *dst_tensors,
|
||||
std::vector<char *> *tensor_bufs) {
|
||||
std::vector<char *> *const tensor_bufs) {
|
||||
if (node == nullptr || dst_tensors == nullptr || tensor_bufs == nullptr) {
|
||||
MS_LOG(ERROR) << "the parameter of this function is nullptr.";
|
||||
return RET_ERROR;
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
namespace mindspore {
|
||||
namespace lite {
|
||||
int TransferFillToAttr(Model::Node *node, std::vector<schema::Tensor *> *dst_tensors,
|
||||
std::vector<char *> *tensor_bufs) {
|
||||
std::vector<char *> *const tensor_bufs) {
|
||||
if (node == nullptr || node->primitive_ == nullptr || dst_tensors == nullptr || tensor_bufs == nullptr) {
|
||||
MS_LOG(ERROR) << "the parameter of this function is nullptr.";
|
||||
return RET_ERROR;
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
namespace mindspore {
|
||||
namespace lite {
|
||||
int TransferGatherAttr(Model::Node *node, std::vector<schema::Tensor *> *dst_tensors,
|
||||
std::vector<char *> *tensor_bufs) {
|
||||
std::vector<char *> *const tensor_bufs) {
|
||||
if (node == nullptr || node->primitive_ == nullptr || dst_tensors == nullptr || tensor_bufs == nullptr) {
|
||||
MS_LOG(ERROR) << "the parameter of this function is nullptr.";
|
||||
return RET_ERROR;
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
namespace mindspore {
|
||||
namespace lite {
|
||||
int TransferNchw2NhwcAttr(Model::Node *node, std::vector<schema::Tensor *> *dst_tensors,
|
||||
std::vector<char *> *tensor_bufs) {
|
||||
std::vector<char *> *const tensor_bufs) {
|
||||
if (node == nullptr || node->primitive_ == nullptr || dst_tensors == nullptr || tensor_bufs == nullptr) {
|
||||
MS_LOG(ERROR) << "the parameter of this function is nullptr.";
|
||||
return RET_ERROR;
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
namespace mindspore {
|
||||
namespace lite {
|
||||
int TransferNhwc2NchwAttr(Model::Node *node, std::vector<schema::Tensor *> *dst_tensors,
|
||||
std::vector<char *> *tensor_bufs) {
|
||||
std::vector<char *> *const tensor_bufs) {
|
||||
if (node == nullptr || node->primitive_ == nullptr || dst_tensors == nullptr || tensor_bufs == nullptr) {
|
||||
MS_LOG(ERROR) << "the parameter of this function is nullptr.";
|
||||
return RET_ERROR;
|
||||
|
|
|
@ -19,7 +19,8 @@
|
|||
|
||||
namespace mindspore {
|
||||
namespace lite {
|
||||
int TransferPadAttr(Model::Node *node, std::vector<schema::Tensor *> *dst_tensors, std::vector<char *> *tensor_bufs) {
|
||||
int TransferPadAttr(Model::Node *node, std::vector<schema::Tensor *> *dst_tensors,
|
||||
std::vector<char *> *const tensor_bufs) {
|
||||
if (node == nullptr || node->primitive_ == nullptr || dst_tensors == nullptr || tensor_bufs == nullptr) {
|
||||
MS_LOG(ERROR) << "the parameter of this function is nullptr.";
|
||||
return RET_ERROR;
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
namespace mindspore {
|
||||
namespace lite {
|
||||
int TransferPermuteAttr(Model::Node *node, std::vector<schema::Tensor *> *dst_tensors,
|
||||
std::vector<char *> *tensor_bufs) {
|
||||
std::vector<char *> *const tensor_bufs) {
|
||||
if (node == nullptr || node->primitive_ == nullptr || dst_tensors == nullptr || tensor_bufs == nullptr) {
|
||||
MS_LOG(ERROR) << "the parameter of this function is nullptr.";
|
||||
return RET_ERROR;
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
namespace mindspore {
|
||||
namespace lite {
|
||||
int TransferPowerToAttr(Model::Node *node, std::vector<schema::Tensor *> *dst_tensors,
|
||||
std::vector<char *> *tensor_bufs) {
|
||||
std::vector<char *> *const tensor_bufs) {
|
||||
if (node == nullptr || node->primitive_ == nullptr || dst_tensors == nullptr || tensor_bufs == nullptr) {
|
||||
MS_LOG(ERROR) << "the parameter of this function is nullptr.";
|
||||
return RET_ERROR;
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
namespace mindspore {
|
||||
namespace lite {
|
||||
int TransferReduceToAttr(Model::Node *node, std::vector<schema::Tensor *> *dst_tensors,
|
||||
std::vector<char *> *tensor_bufs) {
|
||||
std::vector<char *> *const tensor_bufs) {
|
||||
if (node == nullptr || node->primitive_ == nullptr || dst_tensors == nullptr || tensor_bufs == nullptr) {
|
||||
MS_LOG(ERROR) << "the parameter of this function is nullptr.";
|
||||
return RET_ERROR;
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
namespace mindspore {
|
||||
namespace lite {
|
||||
int TransferReshapeAttr(Model::Node *node, std::vector<schema::Tensor *> *dst_tensors,
|
||||
std::vector<char *> *tensor_bufs) {
|
||||
std::vector<char *> *const tensor_bufs) {
|
||||
if (node == nullptr || node->primitive_ == nullptr || dst_tensors == nullptr || tensor_bufs == nullptr) {
|
||||
MS_LOG(ERROR) << "the parameter of this function is nullptr.";
|
||||
return RET_ERROR;
|
||||
|
|
|
@ -19,7 +19,8 @@
|
|||
|
||||
namespace mindspore {
|
||||
namespace lite {
|
||||
int TransferSliceAttr(Model::Node *node, std::vector<schema::Tensor *> *dst_tensors, std::vector<char *> *tensor_bufs) {
|
||||
int TransferSliceAttr(Model::Node *node, std::vector<schema::Tensor *> *dst_tensors,
|
||||
std::vector<char *> *const tensor_bufs) {
|
||||
if (node == nullptr || dst_tensors == nullptr || tensor_bufs == nullptr) {
|
||||
MS_LOG(ERROR) << "the parameter of this function is nullptr.";
|
||||
return RET_ERROR;
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
namespace mindspore {
|
||||
namespace lite {
|
||||
int TransferStridedSliceAttr(Model::Node *node, std::vector<schema::Tensor *> *dst_tensors,
|
||||
std::vector<char *> *tensor_bufs) {
|
||||
std::vector<char *> *const tensor_bufs) {
|
||||
if (node == nullptr || node->primitive_ == nullptr || dst_tensors == nullptr || tensor_bufs == nullptr) {
|
||||
MS_LOG(ERROR) << "the parameter of this function is nullptr.";
|
||||
return RET_ERROR;
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
namespace mindspore {
|
||||
namespace lite {
|
||||
int TransferTileToAttr(Model::Node *node, std::vector<schema::Tensor *> *dst_tensors,
|
||||
std::vector<char *> *tensor_bufs) {
|
||||
std::vector<char *> *const tensor_bufs) {
|
||||
if (node == nullptr || node->primitive_ == nullptr || dst_tensors == nullptr || tensor_bufs == nullptr) {
|
||||
MS_LOG(ERROR) << "the parameter of this function is nullptr.";
|
||||
return RET_ERROR;
|
||||
|
|
|
@ -19,7 +19,8 @@
|
|||
|
||||
namespace mindspore {
|
||||
namespace lite {
|
||||
int TransferTopkAttr(Model::Node *node, std::vector<schema::Tensor *> *dst_tensors, std::vector<char *> *tensor_bufs) {
|
||||
int TransferTopkAttr(Model::Node *node, std::vector<schema::Tensor *> *dst_tensors,
|
||||
std::vector<char *> *const tensor_bufs) {
|
||||
if (node == nullptr || dst_tensors == nullptr || tensor_bufs == nullptr) {
|
||||
MS_LOG(ERROR) << "the parameter of this function is nullptr.";
|
||||
return RET_ERROR;
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
namespace mindspore {
|
||||
namespace lite {
|
||||
int TransferTransposeAttr(Model::Node *node, std::vector<schema::Tensor *> *dst_tensors,
|
||||
std::vector<char *> *tensor_bufs) {
|
||||
std::vector<char *> *const tensor_bufs) {
|
||||
if (node == nullptr || node->primitive_ == nullptr || dst_tensors == nullptr || tensor_bufs == nullptr) {
|
||||
MS_LOG(ERROR) << "the parameter of this function is nullptr.";
|
||||
return RET_ERROR;
|
||||
|
|
|
@ -53,7 +53,7 @@ OpParameter *PopulateConstantOfShapeParameter(const void *prim) {
|
|||
param->value_.f32_value_ = *(prim_val->begin());
|
||||
break;
|
||||
case kNumberTypeInt32:
|
||||
param->value_.int32_value_ = *(prim_val->begin());
|
||||
param->value_.int32_value_ = static_cast<int32_t>(*(prim_val->begin()));
|
||||
break;
|
||||
default:
|
||||
MS_LOG(ERROR) << "The value of constant of shape is invalid";
|
||||
|
|
|
@ -49,7 +49,7 @@ OpParameter *PopulateCropParameter(const void *prim) {
|
|||
|
||||
param->op_parameter_.type_ = primitive->value_type();
|
||||
param->axis_ = value->axis();
|
||||
param->offset_size_ = param_offset->size();
|
||||
param->offset_size_ = static_cast<int>(param_offset->size());
|
||||
for (size_t i = 0; i < param_offset->size(); ++i) {
|
||||
param->offset_[i] = *(param_offset->begin() + i);
|
||||
}
|
||||
|
|
|
@ -20,8 +20,10 @@ using mindspore::schema::PrimitiveType_MaxPoolFusion;
|
|||
|
||||
namespace mindspore {
|
||||
namespace lite {
|
||||
constexpr auto kMinShapeSize = 2;
|
||||
constexpr auto kMinPadSize = 4;
|
||||
constexpr size_t kMinShapeSize = 2;
|
||||
constexpr size_t kMinPadSize = 4;
|
||||
constexpr int kOffsetTwo = 2;
|
||||
constexpr int kOffsetThree = 3;
|
||||
OpParameter *PopulateAvgPoolParameter(const void *primitive) {
|
||||
auto pooling_prim = static_cast<const schema::Primitive *>(primitive);
|
||||
MS_ASSERT(pooling_prim != nullptr);
|
||||
|
@ -53,8 +55,8 @@ OpParameter *PopulateAvgPoolParameter(const void *primitive) {
|
|||
if (pad != nullptr && pad->size() >= kMinPadSize) {
|
||||
param->pad_u_ = static_cast<int>(*(pad->begin()));
|
||||
param->pad_d_ = static_cast<int>(*(pad->begin() + 1));
|
||||
param->pad_l_ = static_cast<int>(*(pad->begin() + 2));
|
||||
param->pad_r_ = static_cast<int>(*(pad->begin() + 3));
|
||||
param->pad_l_ = static_cast<int>(*(pad->begin() + kOffsetTwo));
|
||||
param->pad_r_ = static_cast<int>(*(pad->begin() + kOffsetThree));
|
||||
}
|
||||
if (!param->global_) {
|
||||
auto kernel_size = value->kernel_size();
|
||||
|
@ -138,8 +140,8 @@ OpParameter *PopulateMaxPoolParameter(const void *primitive) {
|
|||
if (pad != nullptr && pad->size() >= kMinPadSize) {
|
||||
param->pad_u_ = static_cast<int>(*(pad->begin()));
|
||||
param->pad_d_ = static_cast<int>(*(pad->begin() + 1));
|
||||
param->pad_l_ = static_cast<int>(*(pad->begin() + 2));
|
||||
param->pad_r_ = static_cast<int>(*(pad->begin() + 3));
|
||||
param->pad_l_ = static_cast<int>(*(pad->begin() + kOffsetTwo));
|
||||
param->pad_r_ = static_cast<int>(*(pad->begin() + kOffsetThree));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -59,10 +59,13 @@ OpParameter *PopulateSpaceToBatchNDParameter(const void *prim) {
|
|||
free(param);
|
||||
return nullptr;
|
||||
}
|
||||
if (fb_paddings->size() == 0 ||
|
||||
((*(fb_paddings->begin())) != nullptr && (*(fb_paddings->begin()))->data() != nullptr &&
|
||||
static_cast<uint64_t>(fb_paddings->size() * (*(fb_paddings->begin()))->data()->size()) >
|
||||
std::numeric_limits<size_t>::max() / sizeof(int64_t))) {
|
||||
if (fb_paddings->size() == 0 || *(fb_paddings->begin()) == nullptr || (*(fb_paddings->begin()))->data() == nullptr) {
|
||||
MS_LOG(ERROR) << "exit attr is nullptr.";
|
||||
free(param);
|
||||
return nullptr;
|
||||
}
|
||||
size_t num = static_cast<size_t>(fb_paddings->size() * (*(fb_paddings->begin()))->data()->size());
|
||||
if (num > std::numeric_limits<size_t>::max() / sizeof(int64_t)) {
|
||||
MS_LOG(ERROR) << "The value of paddings.size() is zero or too big";
|
||||
free(param);
|
||||
return nullptr;
|
||||
|
|
|
@ -62,10 +62,13 @@ OpParameter *PopulateSpaceToBatchParameter(const void *prim) {
|
|||
free(param);
|
||||
return nullptr;
|
||||
}
|
||||
if (fb_paddings->size() == 0 ||
|
||||
((*(fb_paddings->begin())) != nullptr && (*(fb_paddings->begin()))->data() != nullptr &&
|
||||
static_cast<uint64_t>(fb_paddings->size() * (*(fb_paddings->begin()))->data()->size()) >
|
||||
std::numeric_limits<size_t>::max() / sizeof(int64_t))) {
|
||||
if (fb_paddings->size() == 0 || *(fb_paddings->begin()) == nullptr || (*(fb_paddings->begin()))->data() == nullptr) {
|
||||
MS_LOG(ERROR) << "exit attr is nullptr.";
|
||||
free(param);
|
||||
return nullptr;
|
||||
}
|
||||
size_t num = static_cast<size_t>(fb_paddings->size() * (*(fb_paddings->begin()))->data()->size());
|
||||
if (num > std::numeric_limits<size_t>::max() / sizeof(int64_t)) {
|
||||
MS_LOG(ERROR) << "The value of paddings.size() is zero or too big";
|
||||
free(param);
|
||||
return nullptr;
|
||||
|
|
|
@ -44,6 +44,11 @@ OpParameter *PopulateSpliceParameter(const void *prim) {
|
|||
return nullptr;
|
||||
}
|
||||
std::vector<int> primitive_context(context->begin(), context->end());
|
||||
if (primitive_context.size() > std::numeric_limits<int>::max()) {
|
||||
MS_LOG(ERROR) << "size is too big.";
|
||||
free(param);
|
||||
return nullptr;
|
||||
}
|
||||
param->context_dim_ = static_cast<int>(primitive_context.size());
|
||||
|
||||
// malloc && memset for context
|
||||
|
@ -57,7 +62,7 @@ OpParameter *PopulateSpliceParameter(const void *prim) {
|
|||
int src_to_dst_row_offset = INT32_MIN;
|
||||
memset(param->context_, 0, param->context_dim_ * sizeof(int));
|
||||
for (int i = 0; i < param->context_dim_; ++i) {
|
||||
param->context_[i] = primitive_context.at(i);
|
||||
param->context_[i] = primitive_context[i];
|
||||
src_to_dst_row_offset = std::max(src_to_dst_row_offset, std::abs(primitive_context.at(i)));
|
||||
}
|
||||
|
||||
|
@ -69,6 +74,12 @@ OpParameter *PopulateSpliceParameter(const void *prim) {
|
|||
return nullptr;
|
||||
}
|
||||
std::vector<int> primitive_forward_indexes(forward_indexes->begin(), forward_indexes->end());
|
||||
if (primitive_forward_indexes.size() > std::numeric_limits<int>::max()) {
|
||||
MS_LOG(ERROR) << "size is too big.";
|
||||
free(param->context_);
|
||||
free(param);
|
||||
return nullptr;
|
||||
}
|
||||
param->forward_indexes_dim_ = static_cast<int>(primitive_forward_indexes.size());
|
||||
|
||||
// malloc && memset for forward_indexes
|
||||
|
@ -81,10 +92,6 @@ OpParameter *PopulateSpliceParameter(const void *prim) {
|
|||
}
|
||||
memset(param->forward_indexes_, 0, param->forward_indexes_dim_ * sizeof(int));
|
||||
memcpy(param->forward_indexes_, primitive_forward_indexes.data(), param->forward_indexes_dim_ * sizeof(int));
|
||||
|
||||
for (int i = 0; i < param->context_dim_; ++i) {
|
||||
param->context_[i] = primitive_context.at(i);
|
||||
}
|
||||
param->output_dim_ = value->output_dim();
|
||||
return reinterpret_cast<OpParameter *>(param);
|
||||
}
|
||||
|
|
|
@ -52,7 +52,7 @@ OpParameter *PopulateConstantOfShapeParameter(const void *prim) {
|
|||
param->value_.f32_value_ = value->data()[0];
|
||||
break;
|
||||
case kNumberTypeInt32:
|
||||
param->value_.int32_value_ = value->data()[0];
|
||||
param->value_.int32_value_ = static_cast<int32_t>(value->data()[0]);
|
||||
break;
|
||||
default:
|
||||
MS_LOG(ERROR) << "The value of constant of shape is invalid";
|
||||
|
|
|
@ -46,7 +46,7 @@ OpParameter *PopulateCropParameter(const void *prim) {
|
|||
memset(crop_param, 0, sizeof(CropParameter));
|
||||
crop_param->op_parameter_.type_ = schema::PrimitiveType_Crop;
|
||||
crop_param->axis_ = crop_prim->axis();
|
||||
crop_param->offset_size_ = param_offset->size();
|
||||
crop_param->offset_size_ = static_cast<int>(param_offset->size());
|
||||
for (size_t i = 0; i < param_offset->size(); ++i) {
|
||||
crop_param->offset_[i] = *(param_offset->begin() + i);
|
||||
}
|
||||
|
|
|
@ -41,8 +41,8 @@ OpParameter *PopulateLayerNormParameterV0(const void *prim) {
|
|||
}
|
||||
memset(layer_norm_parameter, 0, sizeof(LayerNormParameter));
|
||||
layer_norm_parameter->op_parameter_.type_ = schema::PrimitiveType_LayerNormFusion;
|
||||
layer_norm_parameter->begin_norm_axis_ = -normalized_shape->size();
|
||||
layer_norm_parameter->begin_params_axis_ = -normalized_shape->size();
|
||||
layer_norm_parameter->begin_norm_axis_ = -static_cast<int>(normalized_shape->size());
|
||||
layer_norm_parameter->begin_params_axis_ = -static_cast<int>(normalized_shape->size());
|
||||
layer_norm_parameter->epsilon_ = layer_norm_prim->epsilon();
|
||||
layer_norm_parameter->elementwise_affine_ = layer_norm_prim->elementwiseAffine();
|
||||
|
||||
|
|
|
@ -16,16 +16,11 @@
|
|||
|
||||
#include "schema/model_v0_generated.h"
|
||||
#include "src/ops/populate/populate_register.h"
|
||||
#include "nnacl/partial_fusion_parameter.h"
|
||||
|
||||
namespace mindspore {
|
||||
namespace lite {
|
||||
namespace {
|
||||
struct PartialParameter {
|
||||
OpParameter op_parameter_;
|
||||
int sub_graph_index_ = 0;
|
||||
};
|
||||
using PartialParameter = PartialParameter;
|
||||
|
||||
OpParameter *PopulatePartialParameter(const void *prim) {
|
||||
auto *primitive = static_cast<const schema::v0::Primitive *>(prim);
|
||||
MS_ASSERT(primitive != nullptr);
|
||||
|
|
|
@ -48,7 +48,7 @@ OpParameter *PopulatePriorBoxParameter(const void *prim) {
|
|||
free(prior_box_param);
|
||||
return nullptr;
|
||||
}
|
||||
prior_box_param->min_sizes_size = min_sizes->size();
|
||||
prior_box_param->min_sizes_size = static_cast<int32_t>(min_sizes->size());
|
||||
memcpy(prior_box_param->min_sizes, min_sizes->data(), min_sizes->size() * sizeof(int32_t));
|
||||
|
||||
auto max_sizes = prior_box_prim->max_sizes();
|
||||
|
@ -62,7 +62,7 @@ OpParameter *PopulatePriorBoxParameter(const void *prim) {
|
|||
free(prior_box_param);
|
||||
return nullptr;
|
||||
}
|
||||
prior_box_param->max_sizes_size = max_sizes->size();
|
||||
prior_box_param->max_sizes_size = static_cast<int32_t>(max_sizes->size());
|
||||
memcpy(prior_box_param->max_sizes, max_sizes->data(), max_sizes->size() * sizeof(int32_t));
|
||||
|
||||
auto aspect_ratios = prior_box_prim->aspect_ratios();
|
||||
|
@ -76,7 +76,7 @@ OpParameter *PopulatePriorBoxParameter(const void *prim) {
|
|||
free(prior_box_param);
|
||||
return nullptr;
|
||||
}
|
||||
prior_box_param->aspect_ratios_size = aspect_ratios->size();
|
||||
prior_box_param->aspect_ratios_size = static_cast<int32_t>(aspect_ratios->size());
|
||||
memcpy(prior_box_param->aspect_ratios, aspect_ratios->data(), aspect_ratios->size() * sizeof(float));
|
||||
|
||||
auto variances = prior_box_prim->variances();
|
||||
|
|
|
@ -37,7 +37,7 @@ OpParameter *PopulateReduceParameter(const void *prim) {
|
|||
memset(reduce_param, 0, sizeof(ReduceParameter));
|
||||
reduce_param->op_parameter_.type_ = schema::PrimitiveType_ReduceFusion;
|
||||
|
||||
reduce_param->keep_dims_ = reduce_prim->keepDims();
|
||||
reduce_param->keep_dims_ = static_cast<bool>(reduce_prim->keepDims());
|
||||
reduce_param->reduce_to_end_ = reduce_prim->reduceToEnd();
|
||||
reduce_param->coeff = reduce_prim->coeff();
|
||||
auto axisVector = reduce_prim->axes();
|
||||
|
|
|
@ -42,7 +42,7 @@ OpParameter *PopulateReverseParameter(const void *prim) {
|
|||
free(reverse_param);
|
||||
return nullptr;
|
||||
}
|
||||
reverse_param->num_axis_ = flatAxis->size();
|
||||
reverse_param->num_axis_ = static_cast<int>(flatAxis->size());
|
||||
int i = 0;
|
||||
for (int flatAxi : *flatAxis) {
|
||||
reverse_param->axis_[i++] = flatAxi;
|
||||
|
|
|
@ -45,7 +45,7 @@ OpParameter *PopulateSliceParameter(const void *prim) {
|
|||
return nullptr;
|
||||
}
|
||||
for (size_t i = 0; i < param_axis->size(); ++i) {
|
||||
slice_param->axis_[i] = param_axis->Get(i);
|
||||
slice_param->axis_[i] = static_cast<int32_t>(param_axis->Get(i));
|
||||
}
|
||||
} else {
|
||||
// use default axes
|
||||
|
|
|
@ -42,31 +42,26 @@ OpParameter *PopulateSpaceToBatchParameter(const void *prim) {
|
|||
free(space_batch_param);
|
||||
return nullptr;
|
||||
}
|
||||
space_batch_param->m_ = block_sizes->size();
|
||||
if (((size_t)block_sizes->size()) > COMM_SHAPE_SIZE) {
|
||||
space_batch_param->m_ = static_cast<int>(block_sizes->size());
|
||||
if (block_sizes->size() > COMM_SHAPE_SIZE) {
|
||||
MS_LOG(ERROR) << "The value of block_sizes.size() is too big,which cannot be bigger than " << COMM_SHAPE_SIZE;
|
||||
free(space_batch_param);
|
||||
return nullptr;
|
||||
}
|
||||
memcpy(space_batch_param->block_sizes_, (block_sizes->data()), block_sizes->size() * sizeof(int));
|
||||
memcpy(space_batch_param->block_sizes_, block_sizes->data(), block_sizes->size() * sizeof(int));
|
||||
auto paddings = space_to_batch_prim->paddings();
|
||||
if (paddings == nullptr) {
|
||||
MS_LOG(ERROR) << "paddings is nullptr";
|
||||
free(space_batch_param);
|
||||
return nullptr;
|
||||
}
|
||||
if (((size_t)paddings->size()) > COMM_SHAPE_SIZE) {
|
||||
if (paddings->size() > COMM_SHAPE_SIZE) {
|
||||
MS_LOG(ERROR) << "The value of paddings.size() is too big,which cannot be bigger than " << COMM_SHAPE_SIZE;
|
||||
free(space_batch_param);
|
||||
return nullptr;
|
||||
}
|
||||
memcpy(space_batch_param->paddings_, (paddings->data()), paddings->size() * sizeof(int));
|
||||
|
||||
space_batch_param->m_ = block_sizes->size();
|
||||
for (int i = 0; i < space_batch_param->m_; i++) {
|
||||
space_batch_param->block_sizes_[i] = block_sizes->data()[i];
|
||||
}
|
||||
|
||||
return reinterpret_cast<OpParameter *>(space_batch_param);
|
||||
}
|
||||
} // namespace
|
||||
|
|
|
@ -53,7 +53,7 @@ OpParameter *PopulateSplitParameter(const void *prim) {
|
|||
auto split_sizes_vector_ = split_prim->sizeSplits();
|
||||
if (split_sizes_vector_ != nullptr) {
|
||||
int i = 0;
|
||||
for (auto iter = split_sizes_vector_->begin(); iter != split_sizes_vector_->end(); iter++) {
|
||||
for (auto iter = split_sizes_vector_->begin(); iter != split_sizes_vector_->end(); ++iter) {
|
||||
split_param->split_sizes_[i++] = *iter;
|
||||
}
|
||||
split_param->split_count_ = split_param->num_split_;
|
||||
|
|
|
@ -47,9 +47,9 @@ OpParameter *PopulateUnsqueezeParameter(const void *prim) {
|
|||
free(unsqueeze_param);
|
||||
return nullptr;
|
||||
}
|
||||
unsqueeze_param->num_dim_ = flat_axis->size();
|
||||
unsqueeze_param->num_dim_ = static_cast<int>(flat_axis->size());
|
||||
int i = 0;
|
||||
for (auto iter = flat_axis->begin(); iter != flat_axis->end(); iter++) {
|
||||
for (auto iter = flat_axis->begin(); iter != flat_axis->end(); ++iter) {
|
||||
unsqueeze_param->dims_[i++] = *iter;
|
||||
}
|
||||
return reinterpret_cast<OpParameter *>(unsqueeze_param);
|
||||
|
|
|
@ -16,15 +16,11 @@
|
|||
|
||||
#include "schema/model_v0_generated.h"
|
||||
#include "src/ops/populate/populate_register.h"
|
||||
#include "nnacl/while_parameter.h"
|
||||
|
||||
namespace mindspore {
|
||||
namespace lite {
|
||||
namespace {
|
||||
struct WhileParemeter {
|
||||
OpParameter op_parameter_;
|
||||
int body_subgraph_index = 0;
|
||||
int cond_subgraph_index = 0;
|
||||
};
|
||||
using WhileParemeter = WhileParemeter;
|
||||
OpParameter *PopulateWhileParameter(const void *prim) {
|
||||
auto *primitive = static_cast<const schema::v0::Primitive *>(prim);
|
||||
|
|
|
@ -14,16 +14,11 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
#include "src/ops/populate/populate_register.h"
|
||||
#include "nnacl/while_parameter.h"
|
||||
using mindspore::schema::PrimitiveType_While;
|
||||
|
||||
namespace mindspore {
|
||||
namespace lite {
|
||||
struct WhileParemeter {
|
||||
OpParameter op_parameter_;
|
||||
int body_subgraph_index = 0;
|
||||
int cond_subgraph_index = 0;
|
||||
};
|
||||
using WhileParemeter = WhileParemeter;
|
||||
OpParameter *PopulateWhileParemeter(const void *prim) {
|
||||
auto primitive = static_cast<const schema::Primitive *>(prim);
|
||||
MS_ASSERT(primitive != nullptr);
|
||||
|
|
|
@ -19,9 +19,7 @@
|
|||
#include "src/common/log_adapter.h"
|
||||
|
||||
namespace mindspore {
|
||||
std::shared_ptr<Allocator> Allocator::Create() {
|
||||
return std::shared_ptr<Allocator>(new (std::nothrow) DefaultAllocator());
|
||||
}
|
||||
std::shared_ptr<Allocator> Allocator::Create() { return std::make_shared<DefaultAllocator>(); }
|
||||
|
||||
DefaultAllocator::DefaultAllocator(size_t aligned_size) { aligned_size_ = aligned_size; }
|
||||
|
||||
|
|
|
@ -29,6 +29,9 @@ using mindspore::lite::RET_OK;
|
|||
using mindspore::schema::PrimitiveType_Gather;
|
||||
|
||||
namespace mindspore::kernel {
|
||||
namespace {
|
||||
constexpr int kSecondInput = 2;
|
||||
}
|
||||
GatherFp16CPUKernel::~GatherFp16CPUKernel() {
|
||||
if (input_data_) {
|
||||
ms_context_->allocator->Free(input_data_);
|
||||
|
@ -44,7 +47,8 @@ int GatherFp16CPUKernel::Init() {
|
|||
reinterpret_cast<float16_t *>(ms_context_->allocator->Malloc(input_tensor->ElementsNum() * sizeof(float16_t)));
|
||||
Float32ToFloat16(reinterpret_cast<float *>(input_tensor->data_c()), input_data_, input_tensor->ElementsNum());
|
||||
}
|
||||
(reinterpret_cast<GatherParameter *>(op_parameter_))->axis_ = *(reinterpret_cast<int *>(in_tensors_.at(2)->data_c()));
|
||||
(reinterpret_cast<GatherParameter *>(op_parameter_))->axis_ =
|
||||
*(reinterpret_cast<int *>(in_tensors_.at(kSecondInput)->data_c()));
|
||||
if (!InferShapeDone()) {
|
||||
return RET_OK;
|
||||
}
|
||||
|
|
|
@ -19,6 +19,11 @@
|
|||
#include "ops/primitive_c.h"
|
||||
|
||||
namespace mindspore::lite {
|
||||
namespace {
|
||||
constexpr int kBuildInputFlagTwo = 2;
|
||||
constexpr int kBuildInputFlagThree = 3;
|
||||
constexpr int kBuildInputFlagFour = 4;
|
||||
} // namespace
|
||||
STATUS InputAdjust::AddAttrToInput(const FuncGraphPtr &func_graph, const CNodePtr &cnode, int input_num,
|
||||
const std::string &attr_name, int flag) {
|
||||
MS_ASSERT(cnode != nullptr);
|
||||
|
@ -96,10 +101,10 @@ bool InputAdjust::Run(const FuncGraphPtr &func_graph) {
|
|||
}
|
||||
if (opt::CheckPrimitiveType(node, prim::kPrimTranspose)) {
|
||||
MS_LOG(INFO) << "Adjust Transpose";
|
||||
status = AddAttrToInput(func_graph, cnode, opt::kInputIndexTwo, "perm", 2);
|
||||
status = AddAttrToInput(func_graph, cnode, opt::kInputIndexTwo, "perm", kBuildInputFlagTwo);
|
||||
} else if (opt::CheckPrimitiveType(node, prim::kPrimReshape)) {
|
||||
MS_LOG(INFO) << "Adjust Reshape";
|
||||
status = AddAttrToInput(func_graph, cnode, opt::kInputIndexTwo, "shape", 2);
|
||||
status = AddAttrToInput(func_graph, cnode, opt::kInputIndexTwo, "shape", kBuildInputFlagTwo);
|
||||
} else if (opt::CheckPrimitiveType(node, prim::kPrimGather)) {
|
||||
MS_LOG(INFO) << "Adjust Gather";
|
||||
status = AddAttrToInput(func_graph, cnode, opt::kInputIndexThree, "axis", 1);
|
||||
|
@ -111,16 +116,16 @@ bool InputAdjust::Run(const FuncGraphPtr &func_graph) {
|
|||
status = AddAttrToInput(func_graph, cnode, opt::kInputIndexTwo, "k", 1);
|
||||
} else if (opt::CheckPrimitiveType(node, prim::kPrimTileFusion)) {
|
||||
MS_LOG(INFO) << "Adjust TileFusion";
|
||||
status = AddAttrToInput(func_graph, cnode, opt::kInputIndexTwo, "multiples", 2);
|
||||
status = AddAttrToInput(func_graph, cnode, opt::kInputIndexTwo, "multiples", kBuildInputFlagTwo);
|
||||
} else if (opt::CheckPrimitiveType(node, prim::kPrimReduceFusion)) {
|
||||
MS_LOG(INFO) << "Adjust ReduceFusion";
|
||||
status = AddAttrToInput(func_graph, cnode, opt::kInputIndexTwo, "axes", 2);
|
||||
status = AddAttrToInput(func_graph, cnode, opt::kInputIndexTwo, "axes", kBuildInputFlagTwo);
|
||||
} else if (opt::CheckPrimitiveType(node, prim::kPrimPadFusion)) {
|
||||
MS_LOG(INFO) << "Adjust PadFusion";
|
||||
status = AddAttrToInput(func_graph, cnode, opt::kInputIndexTwo, "paddings", 3);
|
||||
status = AddAttrToInput(func_graph, cnode, opt::kInputIndexTwo, "paddings", kBuildInputFlagThree);
|
||||
} else if (opt::CheckPrimitiveType(node, prim::kPrimPowFusion)) {
|
||||
MS_LOG(INFO) << "Adjust PowFuison";
|
||||
status = AddAttrToInput(func_graph, cnode, opt::kInputIndexTwo, "power", 4);
|
||||
status = AddAttrToInput(func_graph, cnode, opt::kInputIndexTwo, "power", kBuildInputFlagFour);
|
||||
} else if (opt::CheckPrimitiveType(node, prim::kPrimResize)) {
|
||||
status = AddAttrToInput(func_graph, cnode, opt::kInputIndexTwo, "zoom_factor", 1);
|
||||
}
|
||||
|
|
|
@ -30,6 +30,7 @@
|
|||
namespace mindspore {
|
||||
namespace opt {
|
||||
namespace {
|
||||
constexpr int kInputChannal = 3;
|
||||
constexpr size_t INITIAL_SIZE = 1024;
|
||||
void FreeTensors(std::vector<lite::Tensor *> *tensors) {
|
||||
if (tensors == nullptr) {
|
||||
|
@ -63,7 +64,7 @@ void RectifyFormat(const CNodePtr &cnode, const std::vector<lite::Tensor *> &inp
|
|||
}
|
||||
for (auto &input : inputs) {
|
||||
auto shape = input->shape();
|
||||
if (shape.size() == kInputSizeFour && shape[3] == 3 && shape[1] == -1) {
|
||||
if (shape.size() == kInputSizeFour && shape[kInputIndexThree] == kInputChannal && shape[1] == -1) {
|
||||
input->set_format(mindspore::NHWC);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -30,6 +30,7 @@ namespace mindspore {
|
|||
namespace opt {
|
||||
namespace {
|
||||
constexpr size_t kFirstInput = 1;
|
||||
constexpr size_t kHalfDivisor = 2;
|
||||
constexpr size_t kOnnxStridedSlice = 6;
|
||||
const std::vector<int> NH2NC = {0, 3, 1, 2};
|
||||
const std::vector<int> NC2NH = {0, 2, 3, 1};
|
||||
|
@ -119,16 +120,16 @@ bool TransposeStrategy::CanFusionIfInsert(const FuncGraphPtr &func_graph, const
|
|||
return false;
|
||||
}
|
||||
auto total_node_count = in_nodes.size() + out_nodes.size();
|
||||
bool can_insert = trans_count > total_node_count / 2;
|
||||
bool can_insert = trans_count > total_node_count / kHalfDivisor;
|
||||
if (CheckPrimitiveType(cnode, prim::kPrimActivation)) {
|
||||
auto prim_act = GetValueNode<std::shared_ptr<ops::Activation>>(cnode->input(0));
|
||||
MS_ASSERT(prim_act != nullptr);
|
||||
if (prim_act->get_activation_type() == mindspore::ActivationType::LEAKY_RELU) {
|
||||
can_insert = trans_count >= total_node_count / 2;
|
||||
can_insert = trans_count >= total_node_count / kHalfDivisor;
|
||||
}
|
||||
}
|
||||
if (CheckPrimitiveType(cnode, prim::kPrimSplit) || CheckPrimitiveType(cnode, prim::kPrimQuantDTypeCast)) {
|
||||
can_insert = trans_count >= total_node_count / 2;
|
||||
can_insert = trans_count >= total_node_count / kHalfDivisor;
|
||||
}
|
||||
if (!can_insert) {
|
||||
return can_insert;
|
||||
|
@ -202,7 +203,7 @@ STATUS TransposeStrategy::ChangeOpAxis(const FuncGraphPtr &func_graph, const CNo
|
|||
auto new_axis = axis_map[axis < 0 ? axis + kInputSizeFour : axis];
|
||||
if (new_axis == 0) {
|
||||
offsets = {offsets[0], offsets[kInputIndexTwo], offsets[kInputIndexThree], offsets[1]};
|
||||
} else if (new_axis == 3) {
|
||||
} else if (new_axis == kInputIndexThree) {
|
||||
offsets = {offsets[1], offsets[kInputIndexTwo], offsets[0]};
|
||||
} else {
|
||||
offsets.push_back(0);
|
||||
|
|
|
@ -28,6 +28,7 @@ using mindspore::lite::NCHW_SHAPE;
|
|||
namespace mindspore {
|
||||
namespace opt {
|
||||
namespace {
|
||||
constexpr int kInputChannel = 3;
|
||||
const std::vector<int> NH2NC = {0, 3, 1, 2};
|
||||
const std::vector<int> NC2NH = {0, 2, 3, 1};
|
||||
|
||||
|
@ -162,7 +163,7 @@ void ConvertNcTensor2Nh(const FuncGraphPtr &func_graph, const CNodePtr &cnode, s
|
|||
} else if (data_info.shape_.size() == kInputSizeTwo) {
|
||||
new_shape = {1, 1, data_info.shape_[0], data_info.shape_[1]};
|
||||
} else if (data_info.shape_.size() == kInputSizeThree) {
|
||||
new_shape = {1, data_info.shape_[0], data_info.shape_[1], data_info.shape_[2]};
|
||||
new_shape = {1, data_info.shape_[0], data_info.shape_[1], data_info.shape_[kInputIndexTwo]};
|
||||
}
|
||||
auto size = data_info.data_.size() / sizeof(float);
|
||||
std::vector<float> new_data(size);
|
||||
|
@ -185,7 +186,7 @@ void ConvertNcTensor2Nh(const FuncGraphPtr &func_graph, const CNodePtr &cnode, s
|
|||
}
|
||||
auto param_node = func_graph->add_parameter();
|
||||
param_node->set_name(cnode->input(index)->fullname_with_scope());
|
||||
std::vector<int64_t> shape_vec{new_shape[0], new_shape[2], new_shape[3], new_shape[1]};
|
||||
std::vector<int64_t> shape_vec{new_shape[0], new_shape[kInputIndexTwo], new_shape[kInputIndexThree], new_shape[1]};
|
||||
auto tensor_info = lite::CreateTensorInfo(new_data.data(), size * sizeof(float), shape_vec, kNumberTypeFloat32);
|
||||
if (tensor_info == nullptr) {
|
||||
MS_LOG(ERROR) << "Create tensor info failed";
|
||||
|
@ -501,8 +502,8 @@ STATUS UnifyFormatPass::HandleGraphInput(const FuncGraphPtr &func_graph, const C
|
|||
if (shape_vector.size() != kInputSizeFour) {
|
||||
continue;
|
||||
}
|
||||
if (func_graph->get_inputs().size() == 1 && fmk_type_ == lite::converter::FmkType_ONNX && shape_vector[3] == 3 &&
|
||||
shape_vector[1] == -1) {
|
||||
if (func_graph->get_inputs().size() == 1 && fmk_type_ == lite::converter::FmkType_ONNX &&
|
||||
shape_vector[kInputIndexThree] == kInputChannel && shape_vector[1] == -1) {
|
||||
continue;
|
||||
}
|
||||
std::vector<int64_t> new_dims = {shape_vector[NCHW_SHAPE::NCHW_N], shape_vector[NCHW_SHAPE::NCHW_H],
|
||||
|
|
Loading…
Reference in New Issue