Merge pull request !3898 from ling/conv1x1
This commit is contained in:
mindspore-ci-bot 2020-08-03 21:51:10 +08:00 committed by Gitee
commit 42594daf80
7 changed files with 12 additions and 21 deletions

View File

@ -26,12 +26,10 @@ class ArgMinMaxBaseCPUKernel : public LiteKernel {
ArgMinMaxBaseCPUKernel(OpParameter *parameter, const std::vector<lite::tensor::Tensor *> &inputs,
const std::vector<lite::tensor::Tensor *> &outputs, const lite::Context *ctx)
: LiteKernel(parameter, inputs, outputs), context_(ctx), data_from_allocator_(false) {
opParameter->thread_num_ = ctx->threadNum;
opParameter->thread_num_ = ctx->thread_num_;
}
virtual ~ArgMinMaxBaseCPUKernel() {
FreeTmpMemory();
}
virtual ~ArgMinMaxBaseCPUKernel() { FreeTmpMemory(); }
int Init() override;
@ -40,6 +38,7 @@ class ArgMinMaxBaseCPUKernel : public LiteKernel {
int Run() override;
void FreeTmpMemory();
private:
const lite::Context *context_;
bool data_from_allocator_;

View File

@ -27,7 +27,7 @@ class BatchToSpaceBaseCPUKernel : public LiteKernel {
BatchToSpaceBaseCPUKernel(OpParameter *parameter, const std::vector<lite::tensor::Tensor *> &inputs,
const std::vector<lite::tensor::Tensor *> &outputs, const lite::Context *ctx)
: LiteKernel(parameter, inputs, outputs) {
opParameter->thread_num_ = ctx->threadNum;
opParameter->thread_num_ = ctx->thread_num_;
}
virtual ~BatchToSpaceBaseCPUKernel() = default;
@ -38,9 +38,8 @@ class BatchToSpaceBaseCPUKernel : public LiteKernel {
int Run() override { return 0; }
bool IsNoCrop() const {
return no_crop_;
}
bool IsNoCrop() const { return no_crop_; }
private:
bool no_crop_;
};

View File

@ -27,7 +27,7 @@ class DepthToSpaceBaseCPUKernel : public LiteKernel {
DepthToSpaceBaseCPUKernel(OpParameter *parameter, const std::vector<lite::tensor::Tensor *> &inputs,
const std::vector<lite::tensor::Tensor *> &outputs, const lite::Context *ctx)
: LiteKernel(parameter, inputs, outputs) {
opParameter->thread_num_ = ctx->threadNum;
opParameter->thread_num_ = ctx->thread_num_;
}
virtual ~DepthToSpaceBaseCPUKernel() = default;
@ -39,5 +39,4 @@ class DepthToSpaceBaseCPUKernel : public LiteKernel {
int Run() override { return 0; }
};
} // namespace mindspore::kernel
#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_BASE_DEPTH_TO_SPACE_BASE_H_

View File

@ -29,7 +29,7 @@ class MatmulBaseCPUKernel : public LiteKernel {
public:
MatmulBaseCPUKernel(OpParameter *parameter, const std::vector<lite::tensor::Tensor *> &inputs,
const std::vector<lite::tensor::Tensor *> &outputs, const Context *ctx)
: LiteKernel(parameter, inputs, outputs), ctx_(ctx), thread_count_(ctx->threadNum) {
: LiteKernel(parameter, inputs, outputs), ctx_(ctx), thread_count_(ctx->thread_num_) {
params_ = reinterpret_cast<MatMulParameter *>(opParameter);
}
~MatmulBaseCPUKernel() = default;

View File

@ -76,12 +76,6 @@ void MatMul8x8(const float *a, const float *b, float *c, const float *bias, ActT
void MatMul(const float *a, const float *b, float *c, const float *bias, ActType act_type, int deep, int row_8_,
int col_8_) {
#ifdef __aarch64__
float minf = (act_type == ActType_No) ? FLT_MIN : 0.f;
float maxf = (act_type == ActType_Relu6) ? 6.0f : FLT_MAX;
MatMulFloatNeon64(a, b, c, bias, maxf, minf, deep, row_8_, col_8_);
#else
MatMul8x8(a, b, c, bias, act_type, deep, row_8_, col_8_);
#endif
return;
}

View File

@ -67,7 +67,7 @@ TEST_F(TestMatMulFp32, simple) {
std::vector<int> c_shape = {1, 2, 3};
int total_size = MMTestInit(&inputs_, &outputs_, a, b, a_shape, b_shape, c_shape);
auto ctx = new lite::Context;
ctx->threadNum = 2;
ctx->thread_num_ = 2;
auto mm = new kernel::MatmulCPUKernel(reinterpret_cast<OpParameter *>(matmul_param), inputs_, outputs_, ctx);
mm->Init();
mm->Run();
@ -98,7 +98,7 @@ TEST_F(TestMatMulFp32, simple_transb) {
std::vector<int> c_shape = {1, 2, 3};
int total_size = MMTestInit(&inputs_, &outputs_, a, b, a_shape, b_shape, c_shape);
auto ctx = new lite::Context;
ctx->threadNum = 2;
ctx->thread_num_ = 2;
auto mm = new kernel::MatmulCPUKernel(reinterpret_cast<OpParameter *>(matmul_param), inputs_, outputs_, ctx);
mm->Init();
mm->Run();
@ -148,7 +148,7 @@ TEST_F(TestMatMulFp32, batch) {
std::vector<int> c_shape = {3, 2, 3};
int total_size = MMTestInit(&inputs_, &outputs_, a, b, a_shape, b_shape, c_shape);
auto ctx = new lite::Context;
ctx->threadNum = 1;
ctx->thread_num_ = 1;
auto mm = new kernel::MatmulCPUKernel(reinterpret_cast<OpParameter *>(matmul_param), inputs_, outputs_, ctx);
mm->Init();
mm->Run();

View File

@ -106,7 +106,7 @@ TEST_F(TestMatmulInt8, mmint8) {
int output_zp;
int total_size = MMInt8TestInit(&inputs_, &outputs_, matmul_param, &correct, &output_scale, &output_zp);
auto ctx = new lite::Context;
ctx->threadNum = 2;
ctx->thread_num_ = 2;
kernel::MatmulInt8CPUKernel *mm =
new kernel::MatmulInt8CPUKernel(reinterpret_cast<OpParameter *>(matmul_param), inputs_, outputs_, ctx);