fix mciro copyright && namespace

This commit is contained in:
z00512249 2021-02-07 10:31:18 +08:00
parent 9b7a3c0ec2
commit 1feca36a45
78 changed files with 171 additions and 171 deletions

View File

@ -18,7 +18,7 @@
static const char micro_tensor_h[] =
"/**\n"
" * Copyright 2019 Huawei Technologies Co., Ltd\n"
" * Copyright 2021 Huawei Technologies Co., Ltd\n"
" *\n"
" * Licensed under the Apache License, Version 2.0 (the \"License\");\n"
" * you may not use this file except in compliance with the License.\n"

View File

@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.

View File

@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.

View File

@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.

View File

@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.

View File

@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.

View File

@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.

View File

@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.

View File

@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.

View File

@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.

View File

@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.

View File

@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.

View File

@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.

View File

@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.

View File

@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.

View File

@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.

View File

@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.

View File

@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.

View File

@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.

View File

@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.

View File

@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.

View File

@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.

View File

@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.

View File

@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.

View File

@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.

View File

@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.

View File

@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.

View File

@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.

View File

@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.

View File

@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.

View File

@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.

View File

@ -22,7 +22,7 @@
using mindspore::schema::PrimitiveType_Activation;
namespace mindspore::lite::micro {
namespace mindspore::lite::micro::nnacl {
int ActivationFP32Coder::DoCode(CoderContext *const context) {
// attribute
@ -68,4 +68,4 @@ int ActivationFP32Coder::DoCode(CoderContext *const context) {
}
REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_Activation, CPUOpCoderCreator<ActivationFP32Coder>)
} // namespace mindspore::lite::micro
} // namespace mindspore::lite::micro::nnacl

View File

@ -20,7 +20,7 @@
#include <vector>
#include "micro/coder/opcoders/op_coder.h"
namespace mindspore::lite::micro {
namespace mindspore::lite::micro::nnacl {
class ActivationFP32Coder final : public OperatorCoder {
public:
@ -35,6 +35,6 @@ class ActivationFP32Coder final : public OperatorCoder {
int DoCode(CoderContext *const context) override;
};
} // namespace mindspore::lite::micro
} // namespace mindspore::lite::micro::nnacl
#endif // MICRO_CODER_OPCODERS_FP32__CODER_H_

View File

@ -20,7 +20,7 @@
#include "micro/coder/opcoders/file_collector.h"
using mindspore::schema::PrimitiveType_AddN;
namespace mindspore::lite::micro {
namespace mindspore::lite::micro::nnacl {
int AddNFP32Coder::DoCode(CoderContext *const context) {
Tensor *input0 = input_tensors_.at(kInputIndex);
@ -45,4 +45,4 @@ int AddNFP32Coder::DoCode(CoderContext *const context) {
REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_AddN, CPUOpCoderCreator<AddNFP32Coder>)
} // namespace mindspore::lite::micro
} // namespace mindspore::lite::micro::nnacl

View File

@ -14,12 +14,12 @@
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_MICRO_CODER_OPCODERS_ADDN_FP32_CODER_H_
#define MINDSPORE_LITE_MICRO_CODER_OPCODERS_ADDN_FP32_CODER_H_
#ifndef MINDSPORE_LITE_MICRO_CODER_OPCODERS_NNACL_ADDN_FP32_CODER_H_
#define MINDSPORE_LITE_MICRO_CODER_OPCODERS_NNACL_ADDN_FP32_CODER_H_
#include <vector>
#include "micro/coder/opcoders/op_coder.h"
namespace mindspore::lite::micro {
namespace mindspore::lite::micro::nnacl {
class AddNFP32Coder : public OperatorCoder {
public:
AddNFP32Coder(const std::vector<Tensor *> &in_tensors, const std::vector<Tensor *> &out_tensors,
@ -32,5 +32,5 @@ class AddNFP32Coder : public OperatorCoder {
int DoCode(CoderContext *const context) override;
};
} // namespace mindspore::lite::micro
#endif // MINDSPORE_LITE_MICRO_CODER_OPCODERS_ADDN_FP32_CODER_H_
} // namespace mindspore::lite::micro::nnacl
#endif // MINDSPORE_LITE_MICRO_CODER_OPCODERS_NNACL_ADDN_FP32_CODER_H_

View File

@ -21,7 +21,7 @@
#include "nnacl/fp32/arithmetic_fp32.h"
#include "micro/coder/log.h"
namespace mindspore::lite::micro {
namespace mindspore::lite::micro::nnacl {
int ArithmeticFP32Coder::Init(CoderContext *const context) {
filter_tensor_ = input_tensors_.at(kWeightIndex);
@ -370,4 +370,4 @@ REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_GreaterEqual,
REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_Eltwise, CPUOpCoderCreator<ArithmeticFP32Coder>)
} // namespace mindspore::lite::micro
} // namespace mindspore::lite::micro::nnacl

View File

@ -23,7 +23,7 @@
#include "nnacl/fp32/arithmetic_fp32.h"
#include "micro/coder/opcoders/serializers/nnacl_serializer/nnacl_fp32_serializer.h"
#define DEFAULT_ARITHMETIC_NDIMS 10
namespace mindspore::lite::micro {
namespace mindspore::lite::micro::nnacl {
using mindspore::schema::PrimitiveType_Add;
@ -105,5 +105,5 @@ class ArithmeticFP32Coder final : public OperatorCoder {
LiteDataType data_type_{kDataTypeFloat};
};
} // namespace mindspore::lite::micro
} // namespace mindspore::lite::micro::nnacl
#endif // MICRO_CODER_OPCODERS_FP32_ARITHMETIC_FP32_CODER_H_

View File

@ -21,7 +21,7 @@
#include "micro/coder/opcoders/serializers/nnacl_serializer/nnacl_fp32_serializer.h"
#include "micro/coder/opcoders/file_collector.h"
namespace mindspore::lite::micro {
namespace mindspore::lite::micro::nnacl {
int ArithmeticSelfFP32Coder::ReSize() {
data_size_ = input_tensor_->ElementsNum();
@ -101,4 +101,4 @@ REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_Round, CPUOpCo
REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_Neg, CPUOpCoderCreator<ArithmeticSelfFP32Coder>)
} // namespace mindspore::lite::micro
} // namespace mindspore::lite::micro::nnacl

View File

@ -23,7 +23,7 @@
#include "nnacl/fp32/arithmetic_self_fp32.h"
#include "nnacl/arithmetic_self_parameter.h"
namespace mindspore::lite::micro {
namespace mindspore::lite::micro::nnacl {
using mindspore::schema::PrimitiveType_Abs;
@ -105,5 +105,5 @@ class ArithmeticSelfFP32Coder final : public OperatorCoder {
size_t data_size_{0};
std::string arithmetic_self_run_;
};
} // namespace mindspore::lite::micro
} // namespace mindspore::lite::micro::nnacl
#endif // MICRO_CODER_OPCODERS_FP32_ARITHMETIC_SELF_FP32_CODER_H_

View File

@ -19,7 +19,7 @@
#include "schema/inner/ops_generated.h"
#include "micro/coder/opcoders/serializers/nnacl_serializer/nnacl_fp32_serializer.h"
namespace mindspore::lite::micro {
namespace mindspore::lite::micro::nnacl {
using mindspore::schema::PrimitiveType_AssignAdd;
@ -52,4 +52,4 @@ REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_AssignAdd, CPU
REG_OPERATOR_CODER(kAllTargets, kNumberTypeInt32, PrimitiveType_AssignAdd, CPUOpCoderCreator<AssignAddFP32Coder>)
} // namespace mindspore::lite::micro
} // namespace mindspore::lite::micro::nnacl

View File

@ -14,14 +14,14 @@
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_MICRO_CODER_OPCODERS_ASSIGN_ADD_FP32_CODER_H_
#define MINDSPORE_LITE_MICRO_CODER_OPCODERS_ASSIGN_ADD_FP32_CODER_H_
#ifndef MINDSPORE_LITE_MICRO_CODER_OPCODERS_NNACL_ASSIGN_ADD_FP32_CODER_H_
#define MINDSPORE_LITE_MICRO_CODER_OPCODERS_NNACL_ASSIGN_ADD_FP32_CODER_H_
#include <vector>
#include "micro/coder/opcoders/op_coder.h"
#include "nnacl/base/tile_base.h"
namespace mindspore::lite::micro {
namespace mindspore::lite::micro::nnacl {
class AssignAddFP32Coder : public OperatorCoder {
public:
AssignAddFP32Coder(const std::vector<Tensor *> &in_tensors, const std::vector<Tensor *> &out_tensors,
@ -33,5 +33,5 @@ class AssignAddFP32Coder : public OperatorCoder {
int DoCode(CoderContext *const context) override;
};
} // namespace mindspore::lite::micro
#endif // MINDSPORE_LITE_MICRO_CODER_OPCODERS_ASSIGN_ADD_FP32_CODER_H_
} // namespace mindspore::lite::micro::nnacl
#endif // MINDSPORE_LITE_MICRO_CODER_OPCODERS_NNACL_ASSIGN_ADD_FP32_CODER_H_

View File

@ -24,7 +24,7 @@
using mindspore::schema::PrimitiveType_BatchNorm;
namespace mindspore::lite::micro {
namespace mindspore::lite::micro::nnacl {
int BatchnormFP32Coder::Init() {
auto bn_parameter = reinterpret_cast<BatchNormParameter *>(parameter_);
@ -66,4 +66,4 @@ int BatchnormFP32Coder::DoCode(CoderContext *const context) {
}
REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_BatchNorm, CPUOpCoderCreator<BatchnormFP32Coder>)
} // namespace mindspore::lite::micro
} // namespace mindspore::lite::micro::nnacl

View File

@ -20,7 +20,7 @@
#include <vector>
#include "micro/coder/opcoders/op_coder.h"
namespace mindspore::lite::micro {
namespace mindspore::lite::micro::nnacl {
class BatchnormFP32Coder final : public OperatorCoder {
public:
@ -38,6 +38,6 @@ class BatchnormFP32Coder final : public OperatorCoder {
int Init();
};
} // namespace mindspore::lite::micro
} // namespace mindspore::lite::micro::nnacl
#endif // MICRO_CODER_OPCODERS_FP32_CODER_H_

View File

@ -21,7 +21,7 @@
using mindspore::schema::PrimitiveType_Concat;
namespace mindspore::lite::micro {
namespace mindspore::lite::micro::nnacl {
int ConcatFP32Coder::Prepare(CoderContext *const context) {
concat_param_ = reinterpret_cast<ConcatParameter *>(parameter_);
@ -74,4 +74,4 @@ int ConcatFP32Coder::DoCode(CoderContext *const context) {
}
REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_Concat, CPUOpCoderCreator<ConcatFP32Coder>)
} // namespace mindspore::lite::micro
} // namespace mindspore::lite::micro::nnacl

View File

@ -14,14 +14,14 @@
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_MICRO_CODER_OPCODERS_CONCAT_FP32_CODER_H_
#define MINDSPORE_LITE_MICRO_CODER_OPCODERS_CONCAT_FP32_CODER_H_
#ifndef MINDSPORE_LITE_MICRO_CODER_OPCODERS_NNACL_CONCAT_FP32_CODER_H_
#define MINDSPORE_LITE_MICRO_CODER_OPCODERS_NNACL_CONCAT_FP32_CODER_H_
#include <vector>
#include "micro/coder/opcoders/op_coder.h"
#include "nnacl/concat_parameter.h"
namespace mindspore::lite::micro {
namespace mindspore::lite::micro::nnacl {
class ConcatFP32Coder : public OperatorCoder {
public:
ConcatFP32Coder(const std::vector<Tensor *> &in_tensors, const std::vector<Tensor *> &out_tensors,
@ -38,5 +38,5 @@ class ConcatFP32Coder : public OperatorCoder {
int axis_{0};
ConcatParameter *concat_param_{nullptr};
};
} // namespace mindspore::lite::micro
#endif // MINDSPORE_LITE_MICRO_CODER_OPCODERS_CONCAT_FP32_CODER_H_
} // namespace mindspore::lite::micro::nnacl
#endif // MINDSPORE_LITE_MICRO_CODER_OPCODERS_NNACL_CONCAT_FP32_CODER_H_

View File

@ -21,7 +21,7 @@
using mindspore::schema::PrimitiveType_ExpandDims;
namespace mindspore::lite::micro {
namespace mindspore::lite::micro::nnacl {
int ExpandDimsFP32Coder::Prepare(CoderContext *const context) { return ReSize(); }
int ExpandDimsFP32Coder::ReSize() {
@ -49,4 +49,4 @@ int ExpandDimsFP32Coder::DoCode(CoderContext *const context) {
REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_ExpandDims, CPUOpCoderCreator<ExpandDimsFP32Coder>)
REG_OPERATOR_CODER(kAllTargets, kNumberTypeInt32, PrimitiveType_ExpandDims, CPUOpCoderCreator<ExpandDimsFP32Coder>)
} // namespace mindspore::lite::micro
} // namespace mindspore::lite::micro::nnacl

View File

@ -14,13 +14,13 @@
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_MICRO_CODER_OPCODERS_EXPANDDIMS_FP32_CODER_H_
#define MINDSPORE_LITE_MICRO_CODER_OPCODERS_EXPANDDIMS_FP32_CODER_H_
#ifndef MINDSPORE_LITE_MICRO_CODER_OPCODERS_NNACL_EXPANDDIMS_FP32_CODER_H_
#define MINDSPORE_LITE_MICRO_CODER_OPCODERS_NNACL_EXPANDDIMS_FP32_CODER_H_
#include <vector>
#include "micro/coder/opcoders/op_coder.h"
namespace mindspore::lite::micro {
namespace mindspore::lite::micro::nnacl {
class ExpandDimsFP32Coder : public OperatorCoder {
public:
ExpandDimsFP32Coder(const std::vector<Tensor *> &in_tensors, const std::vector<Tensor *> &out_tensors,
@ -38,5 +38,5 @@ class ExpandDimsFP32Coder : public OperatorCoder {
int thread_sz_stride_{0};
size_t data_size_{0};
};
} // namespace mindspore::lite::micro
#endif // MINDSPORE_LITE_MICRO_CODER_OPCODERS_EXPANDDIMS_FP32_CODER_H_
} // namespace mindspore::lite::micro::nnacl
#endif // MINDSPORE_LITE_MICRO_CODER_OPCODERS_NNACL_EXPANDDIMS_FP32_CODER_H_

View File

@ -23,7 +23,7 @@
using mindspore::schema::PrimitiveType_Gather;
namespace mindspore::lite::micro {
namespace mindspore::lite::micro::nnacl {
int GatherFP32Coder::Prepare(CoderContext *const context) { return RET_OK; }
@ -66,4 +66,4 @@ int GatherFP32Coder::DoCode(CoderContext *context) {
REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_Gather, CPUOpCoderCreator<GatherFP32Coder>)
} // namespace mindspore::lite::micro
} // namespace mindspore::lite::micro::nnacl

View File

@ -14,14 +14,14 @@
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_MICRO_CODER_OPCODERS_GATHER_FP32_CODER_H_
#define MINDSPORE_LITE_MICRO_CODER_OPCODERS_GATHER_FP32_CODER_H_
#ifndef MINDSPORE_LITE_MICRO_CODER_OPCODERS_NNACL_GATHER_FP32_CODER_H_
#define MINDSPORE_LITE_MICRO_CODER_OPCODERS_NNACL_GATHER_FP32_CODER_H_
#include <vector>
#include "micro/coder/opcoders/op_coder.h"
#include "nnacl/base/tile_base.h"
namespace mindspore::lite::micro {
namespace mindspore::lite::micro::nnacl {
class GatherFP32Coder : public OperatorCoder {
public:
GatherFP32Coder(const std::vector<Tensor *> &in_tensors, const std::vector<Tensor *> &out_tensors,
@ -37,5 +37,5 @@ class GatherFP32Coder : public OperatorCoder {
private:
int32_t *indices_{nullptr};
};
} // namespace mindspore::lite::micro
#endif // MINDSPORE_LITE_MICRO_CODER_OPCODERS_GATHER_FP32_CODER_H_
} // namespace mindspore::lite::micro::nnacl
#endif // MINDSPORE_LITE_MICRO_CODER_OPCODERS_NNACL_GATHER_FP32_CODER_H_

View File

@ -22,7 +22,7 @@
using mindspore::schema::PrimitiveType_Nchw2Nhwc;
namespace mindspore::lite::micro {
namespace mindspore::lite::micro::nnacl {
int Nchw2NhwcFP32Coder::Prepare(CoderContext *const context) { return RET_OK; }
int Nchw2NhwcFP32Coder::DoCode(CoderContext *context) {
@ -48,4 +48,4 @@ int Nchw2NhwcFP32Coder::DoCode(CoderContext *context) {
}
REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_Nchw2Nhwc, CPUOpCoderCreator<Nchw2NhwcFP32Coder>)
} // namespace mindspore::lite::micro
} // namespace mindspore::lite::micro::nnacl

View File

@ -14,15 +14,15 @@
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_MICRO_CODER_OPCODERS_NCHW2FP32_CODER_H_
#define MINDSPORE_LITE_MICRO_CODER_OPCODERS_NCHW2FP32_CODER_H_
#ifndef MINDSPORE_LITE_MICRO_CODER_OPCODERS_NNACL_NCHW2FP32_CODER_H_
#define MINDSPORE_LITE_MICRO_CODER_OPCODERS_NNACL_NCHW2FP32_CODER_H_
#include <string>
#include <vector>
#include "micro/coder/opcoders/op_coder.h"
#include "nnacl/base/tile_base.h"
namespace mindspore::lite::micro {
namespace mindspore::lite::micro::nnacl {
class Nchw2NhwcFP32Coder : public OperatorCoder {
public:
Nchw2NhwcFP32Coder(const std::vector<Tensor *> &in_tensors, const std::vector<Tensor *> &out_tensors,
@ -34,5 +34,5 @@ class Nchw2NhwcFP32Coder : public OperatorCoder {
int DoCode(CoderContext *const context) override;
};
} // namespace mindspore::lite::micro
#endif // MINDSPORE_LITE_MICRO_CODER_OPCODERS_NCHW2FP32_CODER_H_
} // namespace mindspore::lite::micro::nnacl
#endif // MINDSPORE_LITE_MICRO_CODER_OPCODERS_NNACL_NCHW2FP32_CODER_H_

View File

@ -20,7 +20,7 @@
#include "micro/coder/opcoders/file_collector.h"
using mindspore::schema::PrimitiveType_Nhwc2Nchw;
namespace mindspore::lite::micro {
namespace mindspore::lite::micro::nnacl {
int Nhwc2NchwFP32Coder::Prepare(CoderContext *const context) { return RET_OK; }
int Nhwc2NchwFP32Coder::DoCode(CoderContext *const context) {
@ -47,4 +47,4 @@ int Nhwc2NchwFP32Coder::DoCode(CoderContext *const context) {
}
REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_Nhwc2Nchw, CPUOpCoderCreator<Nhwc2NchwFP32Coder>)
} // namespace mindspore::lite::micro
} // namespace mindspore::lite::micro::nnacl

View File

@ -14,14 +14,14 @@
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_MICRO_CODER_OPCODERS_NHWC2NCHW_FP32_CODER_H_
#define MINDSPORE_LITE_MICRO_CODER_OPCODERS_NHWC2NCHW_FP32_CODER_H_
#ifndef MINDSPORE_LITE_MICRO_CODER_OPCODERS_NNACL_NHWC2NCHW_FP32_CODER_H_
#define MINDSPORE_LITE_MICRO_CODER_OPCODERS_NNACL_NHWC2NCHW_FP32_CODER_H_
#include <vector>
#include "micro/coder/opcoders/op_coder.h"
#include "nnacl/base/tile_base.h"
namespace mindspore::lite::micro {
namespace mindspore::lite::micro::nnacl {
class Nhwc2NchwFP32Coder : public OperatorCoder {
public:
Nhwc2NchwFP32Coder(const std::vector<Tensor *> &in_tensors, const std::vector<Tensor *> &out_tensors,
@ -33,5 +33,5 @@ class Nhwc2NchwFP32Coder : public OperatorCoder {
int DoCode(CoderContext *const context) override;
};
} // namespace mindspore::lite::micro
#endif // MINDSPORE_LITE_MICRO_CODER_OPCODERS_NHWC2NCHW_FP32_CODER_H_
} // namespace mindspore::lite::micro::nnacl
#endif // MINDSPORE_LITE_MICRO_CODER_OPCODERS_NNACL_NHWC2NCHW_FP32_CODER_H_

View File

@ -23,7 +23,7 @@
using mindspore::schema::PrimitiveType_Pad;
namespace mindspore::lite::micro {
namespace mindspore::lite::micro::nnacl {
int PadFP32Coder::Prepare(CoderContext *const context) {
pad_param_ = reinterpret_cast<PadParameter *>(parameter_);
@ -100,4 +100,4 @@ int PadFP32Coder::DoCode(CoderContext *const context) {
}
REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_Pad, CPUOpCoderCreator<PadFP32Coder>)
} // namespace mindspore::lite::micro
} // namespace mindspore::lite::micro::nnacl

View File

@ -14,14 +14,14 @@
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_MICRO_CODER_OPCODERS_PAD_FP32_CODER_H_
#define MINDSPORE_LITE_MICRO_CODER_OPCODERS_PAD_FP32_CODER_H_
#ifndef MINDSPORE_LITE_MICRO_CODER_OPCODERS_NNACL_PAD_FP32_CODER_H_
#define MINDSPORE_LITE_MICRO_CODER_OPCODERS_NNACL_PAD_FP32_CODER_H_
#include <vector>
#include "micro/coder/opcoders/op_coder.h"
#include "nnacl/fp32/pad_fp32.h"
namespace mindspore::lite::micro {
namespace mindspore::lite::micro::nnacl {
class PadFP32Coder : public OperatorCoder {
public:
PadFP32Coder(const std::vector<Tensor *> &in_tensors, const std::vector<Tensor *> &out_tensors,
@ -45,5 +45,5 @@ class PadFP32Coder : public OperatorCoder {
int in_[DEFAULT_PAD_NDIMS]{0};
int out_[DEFAULT_PAD_NDIMS]{0};
};
} // namespace mindspore::lite::micro
#endif // MINDSPORE_LITE_MICRO_CODER_OPCODERS_PAD_FP32_CODER_H_
} // namespace mindspore::lite::micro::nnacl
#endif // MINDSPORE_LITE_MICRO_CODER_OPCODERS_NNACL_PAD_FP32_CODER_H_

View File

@ -23,7 +23,7 @@
using mindspore::schema::PrimitiveType_Pooling;
namespace mindspore::lite::micro {
namespace mindspore::lite::micro::nnacl {
int PoolingFP32Coder::DoCode(CoderContext *const context) {
// attribute
@ -100,4 +100,4 @@ int PoolingFP32Coder::DoCode(CoderContext *const context) {
}
REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_Pooling, CPUOpCoderCreator<PoolingFP32Coder>)
} // namespace mindspore::lite::micro
} // namespace mindspore::lite::micro::nnacl

View File

@ -20,7 +20,7 @@
#include <vector>
#include "micro/coder/opcoders/op_coder.h"
namespace mindspore::lite::micro {
namespace mindspore::lite::micro::nnacl {
class PoolingFP32Coder final : public OperatorCoder {
public:
@ -34,6 +34,6 @@ class PoolingFP32Coder final : public OperatorCoder {
int DoCode(CoderContext *const context) override;
};
} // namespace mindspore::lite::micro
} // namespace mindspore::lite::micro::nnacl
#endif // MICRO_CODER_OPCODERS_FP32__CODER_H_

View File

@ -22,7 +22,7 @@
using mindspore::schema::PrimitiveType_Power;
namespace mindspore::lite::micro {
namespace mindspore::lite::micro::nnacl {
int PowerFP32Coder::DoCode(CoderContext *const context) {
scale_ = reinterpret_cast<PowerParameter *>(parameter_)->scale_;
@ -57,4 +57,4 @@ int PowerFP32Coder::DoCode(CoderContext *const context) {
REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_Power, CPUOpCoderCreator<PowerFP32Coder>)
} // namespace mindspore::lite::micro
} // namespace mindspore::lite::micro::nnacl

View File

@ -14,14 +14,14 @@
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_MICRO_CODER_OPCODERS_POWER_FP32_CODER_H_
#define MINDSPORE_LITE_MICRO_CODER_OPCODERS_POWER_FP32_CODER_H_
#ifndef MINDSPORE_LITE_MICRO_CODER_OPCODERS_NNACL_POWER_FP32_CODER_H_
#define MINDSPORE_LITE_MICRO_CODER_OPCODERS_NNACL_POWER_FP32_CODER_H_
#include <vector>
#include "micro/coder/opcoders/op_coder.h"
#include "nnacl/power_parameter.h"
namespace mindspore::lite::micro {
namespace mindspore::lite::micro::nnacl {
class PowerFP32Coder : public OperatorCoder {
public:
PowerFP32Coder(const std::vector<Tensor *> &in_tensors, const std::vector<Tensor *> &out_tensors,
@ -38,5 +38,5 @@ class PowerFP32Coder : public OperatorCoder {
float scale_{0.0f};
float shift_{0.0f};
};
} // namespace mindspore::lite::micro
#endif // MINDSPORE_LITE_MICRO_CODER_OPCODERS_POWER_FP32_CODER_H_
} // namespace mindspore::lite::micro::nnacl
#endif // MINDSPORE_LITE_MICRO_CODER_OPCODERS_NNACL_POWER_FP32_CODER_H_

View File

@ -20,7 +20,7 @@
#include "micro/coder/opcoders/file_collector.h"
using mindspore::schema::PrimitiveType_Reshape;
namespace mindspore::lite::micro {
namespace mindspore::lite::micro::nnacl {
int ReshapeFP32Coder::DoCode(CoderContext *const context) {
size_t data_size = input_tensor_->Size();
@ -36,4 +36,4 @@ int ReshapeFP32Coder::DoCode(CoderContext *const context) {
REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_Reshape, CPUOpCoderCreator<ReshapeFP32Coder>)
REG_OPERATOR_CODER(kAllTargets, kNumberTypeInt32, PrimitiveType_Reshape, CPUOpCoderCreator<ReshapeFP32Coder>)
} // namespace mindspore::lite::micro
} // namespace mindspore::lite::micro::nnacl

View File

@ -14,13 +14,13 @@
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_MICRO_CODER_OPCODERS_RESHAPE_FP32_CODER_H_
#define MINDSPORE_LITE_MICRO_CODER_OPCODERS_RESHAPE_FP32_CODER_H_
#ifndef MINDSPORE_LITE_MICRO_CODER_OPCODERS_NNACL_RESHAPE_FP32_CODER_H_
#define MINDSPORE_LITE_MICRO_CODER_OPCODERS_NNACL_RESHAPE_FP32_CODER_H_
#include <vector>
#include "micro/coder/opcoders/op_coder.h"
namespace mindspore::lite::micro {
namespace mindspore::lite::micro::nnacl {
class ReshapeFP32Coder : public OperatorCoder {
public:
ReshapeFP32Coder(const std::vector<Tensor *> &in_tensors, const std::vector<Tensor *> &out_tensors,
@ -31,5 +31,5 @@ class ReshapeFP32Coder : public OperatorCoder {
int DoCode(CoderContext *const context) override;
};
} // namespace mindspore::lite::micro
#endif // MINDSPORE_LITE_MICRO_CODER_OPCODERS_RESHAPE_FP32_CODER_H_
} // namespace mindspore::lite::micro::nnacl
#endif // MINDSPORE_LITE_MICRO_CODER_OPCODERS_NNACL_RESHAPE_FP32_CODER_H_

View File

@ -21,7 +21,7 @@
using mindspore::schema::PrimitiveType_Scale;
namespace mindspore::lite::micro {
namespace mindspore::lite::micro::nnacl {
ScaleFP32Coder::~ScaleFP32Coder() {
if (scale_param_->const_scale_) {
if (scale_) {
@ -161,4 +161,4 @@ int ScaleFP32Coder::DoCode(CoderContext *const context) {
}
REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_Scale, CPUOpCoderCreator<ScaleFP32Coder>)
} // namespace mindspore::lite::micro
} // namespace mindspore::lite::micro::nnacl

View File

@ -21,7 +21,7 @@
#include "micro/coder/opcoders/op_coder.h"
#include "nnacl/scale.h"
namespace mindspore::lite::micro {
namespace mindspore::lite::micro::nnacl {
class ScaleFP32Coder final : public OperatorCoder {
public:
@ -44,6 +44,6 @@ class ScaleFP32Coder final : public OperatorCoder {
float *offset_{nullptr};
};
} // namespace mindspore::lite::micro
} // namespace mindspore::lite::micro::nnacl
#endif // MICRO_CODER_OPCODERS_FP32__CODER_H_

View File

@ -22,7 +22,7 @@
#include "micro/coder/opcoders/file_collector.h"
using mindspore::schema::PrimitiveType_Slice;
namespace mindspore::lite::micro {
namespace mindspore::lite::micro::nnacl {
int SliceFP32Coder::Prepare(CoderContext *const context) { return RET_OK; }
int SliceFP32Coder::DoCode(CoderContext *const context) {
@ -71,4 +71,4 @@ int SliceFP32Coder::DoCode(CoderContext *const context) {
}
REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_Slice, CPUOpCoderCreator<SliceFP32Coder>)
} // namespace mindspore::lite::micro
} // namespace mindspore::lite::micro::nnacl

View File

@ -14,13 +14,13 @@
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_MICRO_CODER_OPCODERS_SLICE_FP32_CODER_H_
#define MINDSPORE_LITE_MICRO_CODER_OPCODERS_SLICE_FP32_CODER_H_
#ifndef MINDSPORE_LITE_MICRO_CODER_OPCODERS_NNACL_SLICE_FP32_CODER_H_
#define MINDSPORE_LITE_MICRO_CODER_OPCODERS_NNACL_SLICE_FP32_CODER_H_
#include <vector>
#include "micro/coder/opcoders/op_coder.h"
namespace mindspore::lite::micro {
namespace mindspore::lite::micro::nnacl {
class SliceFP32Coder : public OperatorCoder {
public:
SliceFP32Coder(const std::vector<Tensor *> &in_tensors, const std::vector<Tensor *> &out_tensors,
@ -33,5 +33,5 @@ class SliceFP32Coder : public OperatorCoder {
int DoCode(CoderContext *const context) override;
};
} // namespace mindspore::lite::micro
} // namespace mindspore::lite::micro::nnacl
#endif // MINDSPORE_LITE_MICRO_CODER_OPCOD ERS_SLICE_FP32_CODER_H_

View File

@ -21,7 +21,7 @@
using mindspore::schema::PrimitiveType_Squeeze;
namespace mindspore::lite::micro {
namespace mindspore::lite::micro::nnacl {
int SqueezeFP32Coder::DoCode(CoderContext *const context) {
size_t data_size = input_tensor_->Size();
@ -42,4 +42,4 @@ int SqueezeFP32Coder::DoCode(CoderContext *const context) {
REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_Squeeze, CPUOpCoderCreator<SqueezeFP32Coder>)
} // namespace mindspore::lite::micro
} // namespace mindspore::lite::micro::nnacl

View File

@ -14,13 +14,13 @@
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_MICRO_CODER_OPCODERS_SQUEEZE_FP32_CODER_H_
#define MINDSPORE_LITE_MICRO_CODER_OPCODERS_SQUEEZE_FP32_CODER_H_
#ifndef MINDSPORE_LITE_MICRO_CODER_OPCODERS_NNACL_SQUEEZE_FP32_CODER_H_
#define MINDSPORE_LITE_MICRO_CODER_OPCODERS_NNACL_SQUEEZE_FP32_CODER_H_
#include <vector>
#include "micro/coder/opcoders/op_coder.h"
namespace mindspore::lite::micro {
namespace mindspore::lite::micro::nnacl {
class SqueezeFP32Coder : public OperatorCoder {
public:
SqueezeFP32Coder(const std::vector<Tensor *> &in_tensors, const std::vector<Tensor *> &out_tensors,
@ -33,5 +33,5 @@ class SqueezeFP32Coder : public OperatorCoder {
int DoCode(CoderContext *const context) override;
};
} // namespace mindspore::lite::micro
#endif // MINDSPORE_LITE_MICRO_CODER_OPCODERS_SQUEEZE_FP32_CODER_H_
} // namespace mindspore::lite::micro::nnacl
#endif // MINDSPORE_LITE_MICRO_CODER_OPCODERS_NNACL_SQUEEZE_FP32_CODER_H_

View File

@ -22,7 +22,7 @@
using mindspore::schema::PrimitiveType_Tile;
namespace mindspore::lite::micro {
namespace mindspore::lite::micro::nnacl {
void TileFP32Coder::ComputeStrides(const int *shape, int *strides, int ndim) const {
int stride = 1;
for (int i = ndim - 1; i >= 0; i--) {
@ -65,4 +65,4 @@ int TileFP32Coder::DoCode(CoderContext *const context) {
REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_Tile, CPUOpCoderCreator<TileFP32Coder>)
} // namespace mindspore::lite::micro
} // namespace mindspore::lite::micro::nnacl

View File

@ -14,14 +14,14 @@
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_MICRO_CODER_OPCODERS_TILE_FP32_CODER_H_
#define MINDSPORE_LITE_MICRO_CODER_OPCODERS_TILE_FP32_CODER_H_
#ifndef MINDSPORE_LITE_MICRO_CODER_OPCODERS_NNACL_TILE_FP32_CODER_H_
#define MINDSPORE_LITE_MICRO_CODER_OPCODERS_NNACL_TILE_FP32_CODER_H_
#include <vector>
#include "micro/coder/opcoders/op_coder.h"
#include "nnacl/base/tile_base.h"
namespace mindspore::lite::micro {
namespace mindspore::lite::micro::nnacl {
class TileFP32Coder : public OperatorCoder {
public:
TileFP32Coder(const std::vector<Tensor *> &in_tensors, const std::vector<Tensor *> &out_tensors,
@ -39,5 +39,5 @@ class TileFP32Coder : public OperatorCoder {
TileParameter *tile_param_{nullptr};
};
} // namespace mindspore::lite::micro
#endif // MINDSPORE_LITE_MICRO_CODER_OPCODERS_TILE_FP32_CODER_H_
} // namespace mindspore::lite::micro::nnacl
#endif // MINDSPORE_LITE_MICRO_CODER_OPCODERS_NNACL_TILE_FP32_CODER_H_

View File

@ -21,7 +21,7 @@
#include "micro/coder/opcoders/file_collector.h"
using mindspore::schema::PrimitiveType_Transpose;
namespace mindspore::lite::micro {
namespace mindspore::lite::micro::nnacl {
int TransposeFp32Coder::Resize() {
num_unit_ = static_cast<int>(input_tensor_->shape().at(transpose_parameter_->perm_[kNHWC_H]));
@ -91,4 +91,4 @@ int TransposeFp32Coder::DoCode(CoderContext *const context) {
}
REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_Transpose, CPUOpCoderCreator<TransposeFp32Coder>)
} // namespace mindspore::lite::micro
} // namespace mindspore::lite::micro::nnacl

View File

@ -19,7 +19,7 @@
#include <vector>
#include "micro/coder/opcoders/op_coder.h"
#include "nnacl/transpose.h"
namespace mindspore::lite::micro {
namespace mindspore::lite::micro::nnacl {
class TransposeFp32Coder final : public OperatorCoder {
public:
@ -49,5 +49,5 @@ class TransposeFp32Coder final : public OperatorCoder {
int *position_ = nullptr;
};
} // namespace mindspore::lite::micro
} // namespace mindspore::lite::micro::nnacl
#endif // MICRO_LITE_MICRO_CODER_OPCODERS_NNACL_FP32_TRANSPOSE_FP32_CODER_H_

View File

@ -31,7 +31,7 @@ int MallocQuantArgForConcat(ConcatQuantArg *quant_arg, size_t input_num) {
using mindspore::schema::PrimitiveType_Concat;
namespace mindspore::lite::micro {
namespace mindspore::lite::micro::nnacl {
int ConcatInt8Coder::Prepare(CoderContext *const context) {
this->concat_param_ = reinterpret_cast<ConcatParameter *>(parameter_);
@ -111,4 +111,4 @@ int ConcatInt8Coder::DoCode(CoderContext *const context) {
}
REG_OPERATOR_CODER(kAllTargets, kNumberTypeInt8, PrimitiveType_Concat, CPUOpCoderCreator<ConcatInt8Coder>)
} // namespace mindspore::lite::micro
} // namespace mindspore::lite::micro::nnacl

View File

@ -14,15 +14,15 @@
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_MICRO_CODER_OPCODERS_CONCAT_INT8_CODER_H_
#define MINDSPORE_LITE_MICRO_CODER_OPCODERS_CONCAT_INT8_CODER_H_
#ifndef MINDSPORE_LITE_MICRO_CODER_OPCODERS_NNACL_CONCAT_INT8_CODER_H_
#define MINDSPORE_LITE_MICRO_CODER_OPCODERS_NNACL_CONCAT_INT8_CODER_H_
#include <cstring>
#include <vector>
#include "micro/coder/opcoders/op_coder.h"
#include "nnacl/int8/concat_int8.h"
namespace mindspore::lite::micro {
namespace mindspore::lite::micro::nnacl {
class ConcatInt8Coder : public OperatorCoder {
public:
ConcatInt8Coder(const std::vector<Tensor *> &in_tensors, const std::vector<Tensor *> &out_tensors,
@ -52,5 +52,5 @@ class ConcatInt8Coder : public OperatorCoder {
int8_t *input_data_{nullptr};
int axis_ = 0;
};
} // namespace mindspore::lite::micro
#endif // MINDSPORE_LITE_MICRO_CODER_OPCODERS_CONCAT_INT8_CODER_H_
} // namespace mindspore::lite::micro::nnacl
#endif // MINDSPORE_LITE_MICRO_CODER_OPCODERS_NNACL_CONCAT_INT8_CODER_H_

View File

@ -25,7 +25,7 @@
using std::string;
using mindspore::schema::PrimitiveType_Pooling;
namespace mindspore::lite::micro {
namespace mindspore::lite::micro::nnacl {
int PoolingInt8Coder::DoCode(CoderContext *const context) {
// attribute
@ -79,4 +79,4 @@ int PoolingInt8Coder::DoCode(CoderContext *const context) {
}
REG_OPERATOR_CODER(kAllTargets, kNumberTypeInt8, PrimitiveType_Pooling, CPUOpCoderCreator<PoolingInt8Coder>)
} // namespace mindspore::lite::micro
} // namespace mindspore::lite::micro::nnacl

View File

@ -14,15 +14,15 @@
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_MICRO_CODER_OPCODERS_POOLING_INT8_CODER_H
#define MINDSPORE_LITE_MICRO_CODER_OPCODERS_POOLING_INT8_CODER_H
#ifndef MINDSPORE_LITE_MICRO_CODER_OPCODERS_NNACL_POOLING_INT8_CODER_H
#define MINDSPORE_LITE_MICRO_CODER_OPCODERS_NNACL_POOLING_INT8_CODER_H
#include <string>
#include <memory>
#include <vector>
#include "micro/coder/opcoders/op_coder.h"
namespace mindspore::lite::micro {
namespace mindspore::lite::micro::nnacl {
class PoolingInt8Coder final : public OperatorCoder {
public:
@ -37,6 +37,6 @@ class PoolingInt8Coder final : public OperatorCoder {
int DoCode(CoderContext *const context) override;
};
} // namespace mindspore::lite::micro
} // namespace mindspore::lite::micro::nnacl
#endif // MINDSPORE_LITE_MICRO_CODER_OPCODERS_POOLING_INT8_CODER_H
#endif // MINDSPORE_LITE_MICRO_CODER_OPCODERS_NNACL_POOLING_INT8_CODER_H

View File

@ -23,7 +23,7 @@
using mindspore::schema::PrimitiveType_Reshape;
namespace mindspore::lite::micro {
namespace mindspore::lite::micro::nnacl {
int ReshapeInt8Coder::DoCode(CoderContext *const context) {
Tensor *input = OperatorCoder::input_tensors().at(kInputIndex);
@ -55,4 +55,4 @@ int ReshapeInt8Coder::DoCode(CoderContext *const context) {
}
REG_OPERATOR_CODER(kAllTargets, kNumberTypeInt8, PrimitiveType_Reshape, CPUOpCoderCreator<ReshapeInt8Coder>)
} // namespace mindspore::lite::micro
} // namespace mindspore::lite::micro::nnacl

View File

@ -14,13 +14,13 @@
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_MICRO_CODER_OPCODERS_RESHAPE_INT8_CODER_H_
#define MINDSPORE_LITE_MICRO_CODER_OPCODERS_RESHAPE_INT8_CODER_H_
#ifndef MINDSPORE_LITE_MICRO_CODER_OPCODERS_NNACL_RESHAPE_INT8_CODER_H_
#define MINDSPORE_LITE_MICRO_CODER_OPCODERS_NNACL_RESHAPE_INT8_CODER_H_
#include <vector>
#include "micro/coder/opcoders/op_coder.h"
namespace mindspore::lite::micro {
namespace mindspore::lite::micro::nnacl {
class ReshapeInt8Coder : public OperatorCoder {
public:
ReshapeInt8Coder(const std::vector<Tensor *> &in_tensors, const std::vector<Tensor *> &out_tensors,
@ -33,5 +33,5 @@ class ReshapeInt8Coder : public OperatorCoder {
int DoCode(CoderContext *const context) override;
};
} // namespace mindspore::lite::micro
#endif // MINDSPORE_LITE_MICRO_CODER_OPCODERS_RESHAPE_INT8_CODER_H_
} // namespace mindspore::lite::micro::nnacl
#endif // MINDSPORE_LITE_MICRO_CODER_OPCODERS_NNACL_RESHAPE_INT8_CODER_H_

View File

@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.