!22453 [LITE]code review

Merge pull request !22453 from yefeng/149-check_op_2
This commit is contained in:
i-robot 2021-08-27 06:13:20 +00:00 committed by Gitee
commit 111d1a9a61
11 changed files with 22 additions and 11 deletions

View File

@ -65,8 +65,8 @@ int SliceCPUKernel::ReSize() {
}
int SliceCPUKernel::Init() {
CHECK_LESS_RETURN(in_tensors_, 3);
CHECK_LESS_RETURN(out_tensors_, 1);
CHECK_LESS_RETURN(in_tensors_.size(), 3);
CHECK_LESS_RETURN(out_tensors_.size(), 1);
if (!InferShapeDone()) {
return RET_OK;
}

View File

@ -70,7 +70,6 @@ int TensorListSetItemCPUKernel::Run() {
int dim0 = output0_->ElementsNum() - 1;
index_ = reinterpret_cast<int *>(in_tensors_[1]->data_c())[0];
CHECK_NULL_RETURN(index_);
if (index_ < 0 || index_ > dim0) {
if (IncrementOutputSize(output0_->tensors().size()) != RET_OK) {
MS_LOG(ERROR) << "Resizeoutput Error ,index tensor:[" << index_ << "] must be in [0, " << dim0 << "]!";

View File

@ -21,6 +21,7 @@
using mindspore::lite::KernelRegistrar;
using mindspore::lite::RET_ERROR;
using mindspore::lite::RET_NULL_PTR;
using mindspore::lite::RET_OK;
using mindspore::schema::PrimitiveType_SliceFusion;
@ -43,8 +44,8 @@ SliceFp16CPUKernel::~SliceFp16CPUKernel() {
}
int SliceFp16CPUKernel::Init() {
CHECK_LESS_RETURN(in_tensors_, 1);
CHECK_LESS_RETURN(out_tensors_, 1);
CHECK_LESS_RETURN(in_tensors_.size(), 1);
CHECK_LESS_RETURN(out_tensors_.size(), 1);
auto input_tensor = in_tensors_.at(0);
if (input_tensor->data_type() == kNumberTypeFloat32 && input_tensor->data_c() != nullptr) {
input_data_ =

View File

@ -25,6 +25,7 @@ using mindspore::kernel::KERNEL_ARCH;
using mindspore::lite::KernelRegistrar;
using mindspore::lite::RET_ERROR;
using mindspore::lite::RET_INFER_INVALID;
using mindspore::lite::RET_NULL_PTR;
using mindspore::lite::RET_OK;
using mindspore::schema::Format;
using mindspore::schema::PrimitiveType_AdderFusion;

View File

@ -19,6 +19,7 @@
using mindspore::lite::KernelRegistrar;
using mindspore::lite::RET_ERROR;
using mindspore::lite::RET_NULL_PTR;
using mindspore::lite::RET_OK;
using mindspore::schema::PrimitiveType_BatchNorm;
@ -53,7 +54,6 @@ void BatchnormCPUKernel::FillParam() {
auto input_shapes = in_tensors_.at(0)->shape();
auto n_dim = input_shapes.size();
auto param = reinterpret_cast<BatchNormParameter *>(op_parameter_);
CHECK_NULL_RETURN(param);
param->channel_ = input_shapes[n_dim - 1];
param->unit_ = 1;
for (size_t i = 0; i < n_dim - 1; i++) {

View File

@ -24,6 +24,7 @@
using mindspore::kernel::KERNEL_ARCH;
using mindspore::lite::KernelRegistrar;
using mindspore::lite::RET_ERROR;
using mindspore::lite::RET_NULL_PTR;
using mindspore::lite::RET_OK;
using mindspore::schema::PrimitiveType_BatchNorm;
using mindspore::schema::PrimitiveType_FusedBatchNorm;

View File

@ -22,13 +22,14 @@
using mindspore::lite::KernelRegistrar;
using mindspore::lite::RET_ERROR;
using mindspore::lite::RET_NULL_PTR;
using mindspore::lite::RET_OK;
using mindspore::schema::PrimitiveType_SliceFusion;
namespace mindspore::kernel {
int SliceInt8CPUKernel::Init() {
CHECK_LESS_RETURN(in_tensors_, 1);
CHECK_LESS_RETURN(out_tensors_, 1);
CHECK_LESS_RETURN(in_tensors_.size(), 1);
CHECK_LESS_RETURN(out_tensors_.size(), 1);
auto input = in_tensors_.at(0);
auto output = out_tensors_.at(0);
MS_ASSERT(input != nullptr);

View File

@ -19,6 +19,7 @@
using mindspore::lite::KernelRegistrar;
using mindspore::lite::RET_ERROR;
using mindspore::lite::RET_NULL_PTR;
using mindspore::lite::RET_OK;
using mindspore::schema::PrimitiveType_CustomExtractFeatures;

View File

@ -22,6 +22,7 @@
using mindspore::kernel::KERNEL_ARCH;
using mindspore::lite::KernelRegistrar;
using mindspore::lite::RET_ERROR;
using mindspore::lite::RET_NULL_PTR;
using mindspore::lite::RET_OK;
using mindspore::schema::PrimitiveType_LshProjection;

View File

@ -21,6 +21,7 @@
using mindspore::lite::KernelRegistrar;
using mindspore::lite::RET_ERROR;
using mindspore::lite::RET_NULL_PTR;
using mindspore::lite::RET_OK;
using mindspore::lite::StringPack;
using mindspore::schema::PrimitiveType_SkipGram;
@ -38,7 +39,7 @@ int SkipGramCPUKernel::Init() {
int SkipGramCPUKernel::ReSize() { return RET_OK; }
void ParseSentenceToWords(const StringPack &sentence, std::vector<StringPack> *words) {
CHECK_NULL_RETURN(words);
MS_ASSERT(words != nullptr);
int pre = 0;
int i;
for (i = 0; i < sentence.len; i++) {
@ -78,7 +79,7 @@ int SkipGramCPUKernel::Run() {
int index = 1;
int size = words.size();
CHECK_LESS_RETURN(stack.size(), index);
CHECK_LESS_RETURN(static_cast<int>(stack.size()), index);
while (index >= 0) {
if (index < skip_gram_parameter_->ngram_size && stack.at(index) + 1 < size &&
(index == 0 || stack.at(index) - stack.at(index - 1) <= skip_gram_parameter_->max_skip_size)) {

View File

@ -163,7 +163,10 @@ int BatchNormOpenCLKernel::Initweight() {
memset(offset_, 0x00, weight_size);
memset(mean_, 0x00, weight_size);
memset(variance_, 0x00, weight_size);
CHECK_NULL_RETURN(in_tensors_.at(1)->data_c());
CHECK_NULL_RETURN(in_tensors_.at(2)->data_c());
CHECK_NULL_RETURN(in_tensors_.at(3)->data_c());
CHECK_NULL_RETURN(in_tensors_.at(4)->data_c());
if (weight_tensor->data_type() == kNumberTypeFloat16) {
if (use_fp16_enable_) {
memcpy(scale_, in_tensors_.at(1)->data_c(), weight_size);
@ -251,6 +254,8 @@ int BatchNormOpenCLKernel::Prepare() {
}
int BatchNormOpenCLKernel::Run() {
CHECK_NULL_RETURN(in_tensors_.at(0)->data_c());
CHECK_NULL_RETURN(out_tensors_.at(0)->data_c());
MS_LOG(DEBUG) << this->name() << " Running! ";
int arg_cn = 0;
if (ocl_runtime_->SetKernelArg(kernel_, arg_cn++, in_tensors_.at(0)->data_c()) != CL_SUCCESS) {