forked from mindspore-Ecosystem/mindspore
fix bug and add testcase
This commit is contained in:
parent
6b120584c2
commit
704c0b9bf9
|
@ -221,6 +221,10 @@ lite::Primitive *ModelImpl::CopyPrimitive(const schema::Primitive *src_prim) {
|
|||
return new lite::Split(const_cast<schema::Primitive *>(src_prim));
|
||||
case schema::PrimitiveType_OneHot:
|
||||
return new lite::OneHot(const_cast<schema::Primitive *>(src_prim));
|
||||
case schema::PrimitiveType_SpaceToDepth:
|
||||
return new lite::SpaceToDepth(const_cast<schema::Primitive *>(src_prim));
|
||||
case schema::PrimitiveType_Tile:
|
||||
return new lite::Tile(const_cast<schema::Primitive *>(src_prim));
|
||||
case schema::PrimitiveType_Resize:
|
||||
return new lite::Resize(const_cast<schema::Primitive *>(src_prim));
|
||||
case schema::PrimitiveType_MatMul:
|
||||
|
|
|
@ -36,7 +36,7 @@
|
|||
#include "src/runtime/kernel/arm/nnacl/matmul_parameter.h"
|
||||
#include "src/runtime/kernel/arm/nnacl/fp32/roi_pooling.h"
|
||||
#include "src/runtime/kernel/arm/nnacl/softmax_parameter.h"
|
||||
#include "src/runtime/kernel/arm/nnacl/tile.h"
|
||||
#include "src/runtime/kernel/arm/nnacl/fp32/tile.h"
|
||||
#include "src/runtime/kernel/arm/nnacl/fp32/topk.h"
|
||||
#include "src/runtime/kernel/arm/nnacl/fp32/reduce.h"
|
||||
#include "src/runtime/kernel/arm/nnacl/fp32/activation.h"
|
||||
|
@ -55,7 +55,7 @@
|
|||
#include "src/runtime/kernel/arm/nnacl/fp32/gather.h"
|
||||
#include "src/runtime/kernel/arm/nnacl/fp32/reverse.h"
|
||||
#include "src/runtime/kernel/arm/nnacl/reverse_sequence.h"
|
||||
#include "src/runtime/kernel/arm/nnacl/unique.h"
|
||||
#include "src/runtime/kernel/arm/nnacl/fp32/unique.h"
|
||||
#include "src/runtime/kernel/arm/nnacl/scale.h"
|
||||
#include "src/runtime/kernel/arm/nnacl/fp32/gatherNd.h"
|
||||
#include "src/runtime/kernel/arm/nnacl/resize_parameter.h"
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
|
||||
#include <vector>
|
||||
#include "src/lite_kernel.h"
|
||||
#include "src/runtime/kernel/arm/nnacl/tile.h"
|
||||
#include "src/runtime/kernel/arm/nnacl/fp32/tile.h"
|
||||
|
||||
namespace mindspore::kernel {
|
||||
class TileCPUKernel : public LiteKernel {
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
|
||||
#include <vector>
|
||||
#include "src/lite_kernel.h"
|
||||
#include "src/runtime/kernel/arm/nnacl/unique.h"
|
||||
#include "src/runtime/kernel/arm/nnacl/fp32/unique.h"
|
||||
|
||||
namespace mindspore::kernel {
|
||||
class UniqueCPUKernel : public LiteKernel {
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
|
||||
#include <vector>
|
||||
#include "src/lite_kernel.h"
|
||||
#include "src/runtime/kernel/arm/nnacl/add_int8.h"
|
||||
#include "src/runtime/kernel/arm/nnacl/int8/add_int8.h"
|
||||
#include "src/runtime/runtime_api.h"
|
||||
|
||||
namespace mindspore::kernel {
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
|
||||
#include <vector>
|
||||
#include "src/lite_kernel.h"
|
||||
#include "src/runtime/kernel/arm/nnacl/unique.h"
|
||||
#include "src/runtime/kernel/arm/nnacl/fp32/unique.h"
|
||||
#include "src/runtime/kernel/arm/nnacl/arithmetic_common.h"
|
||||
|
||||
namespace mindspore::kernel {
|
||||
|
|
|
@ -14,7 +14,7 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "nnacl/tile.h"
|
||||
#include "nnacl/fp32/tile.h"
|
||||
#include <string.h>
|
||||
|
||||
void DoCopyData(float *input_data, float *output_data, size_t size, size_t multiple) {
|
||||
|
@ -35,7 +35,7 @@ int DoTileOneDimension(float *input_data, float *output_data, size_t dim, TilePa
|
|||
for (size_t j = 0; j < parameter->multiples_[dim]; ++j) {
|
||||
size_t in_pos = parameter->in_strides_[dim] * i;
|
||||
size_t out_pos = parameter->out_strides_[dim] * (i + j * src_dim_size);
|
||||
TileOneDimension(input_data + in_pos, output_data + out_pos, dim + 1, parameter);
|
||||
DoTileOneDimension(input_data + in_pos, output_data + out_pos, dim + 1, parameter);
|
||||
}
|
||||
}
|
||||
return 0;
|
|
@ -14,7 +14,7 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "nnacl/unique.h"
|
||||
#include "nnacl/fp32/unique.h"
|
||||
|
||||
int Find(float *array, int len, float target) {
|
||||
for (int i = 0; i < len; ++i) {
|
|
@ -14,7 +14,7 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "nnacl/add_int8.h"
|
||||
#include "nnacl/int8/add_int8.h"
|
||||
#ifdef ENABLE_NEON
|
||||
#include <arm_neon.h>
|
||||
#endif
|
|
@ -18,7 +18,7 @@
|
|||
#include "nnacl/int8/arithmetic_self_int8.h"
|
||||
#ifdef ENABLE_NEON
|
||||
#include <arm_neon.h>
|
||||
#include "nnacl/add_int8.h"
|
||||
#include "nnacl/int8/add_int8.h"
|
||||
#endif
|
||||
#include "nnacl/quantization/fixed_point.h"
|
||||
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
#include "nnacl/mul_parameter.h"
|
||||
#ifdef ENABLE_NEON
|
||||
#include <arm_neon.h>
|
||||
#include "nnacl/add_int8.h"
|
||||
#include "nnacl/int8/add_int8.h"
|
||||
#endif
|
||||
#include "nnacl/quantization/fixed_point.h"
|
||||
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
#include "nnacl/int8/sub_int8.h"
|
||||
#ifdef ENABLE_NEON
|
||||
#include <arm_neon.h>
|
||||
#include "nnacl/add_int8.h"
|
||||
#include "nnacl/int8/add_int8.h"
|
||||
#endif
|
||||
#include "nnacl/quantization/fixed_point.h"
|
||||
|
||||
|
|
|
@ -0,0 +1,157 @@
|
|||
/**
|
||||
* Copyright 2020 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include <iostream>
|
||||
#include <memory>
|
||||
#include "common/common_test.h"
|
||||
#include "mindspore/lite/src/runtime/kernel/arm/nnacl/reverse_sequence.h"
|
||||
#include "mindspore/lite/src/kernel_registry.h"
|
||||
|
||||
namespace mindspore {
|
||||
class TestReverseSequenceFp32 : public mindspore::CommonTest {
|
||||
public:
|
||||
TestReverseSequenceFp32() {}
|
||||
};
|
||||
|
||||
TEST_F(TestReverseSequenceFp32, BatchLessSeq) {
|
||||
lite::tensor::Tensor in_tensor0(kNumberTypeFloat32, {2, 3, 4, 2});
|
||||
lite::tensor::Tensor in_tensor1(kNumberTypeInt32, {3});
|
||||
lite::tensor::Tensor out_tensor(kNumberTypeFloat32, {2, 3, 4, 2});
|
||||
float input_data0[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
|
||||
16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
|
||||
32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47};
|
||||
int input_data1[] = {2, 3, 4};
|
||||
float output_data[2 * 3 * 4 * 2] = {0};
|
||||
in_tensor0.SetData(input_data0);
|
||||
in_tensor1.SetData(input_data1);
|
||||
out_tensor.SetData(output_data);
|
||||
std::vector<lite::tensor::Tensor *> inputs = {&in_tensor0, &in_tensor1};
|
||||
std::vector<lite::tensor::Tensor *> outputs = {&out_tensor};
|
||||
|
||||
ReverseSequenceParameter parameter = {0};
|
||||
parameter.batch_axis_ = 1;
|
||||
parameter.seq_axis_ = 2;
|
||||
kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeFloat32, schema::PrimitiveType_ReverseSequence};
|
||||
|
||||
auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc);
|
||||
EXPECT_NE(creator, nullptr);
|
||||
|
||||
auto ctx = std::make_shared<lite::Context>();
|
||||
auto kernel = creator(inputs, outputs, reinterpret_cast<OpParameter *>(¶meter), ctx.get(), desc, nullptr);
|
||||
EXPECT_NE(kernel, nullptr);
|
||||
|
||||
auto ret = kernel->Run();
|
||||
EXPECT_EQ(0, ret);
|
||||
|
||||
float expect[] = {2, 3, 0, 1, 4, 5, 6, 7, 12, 13, 10, 11, 8, 9, 14, 15, 22, 23, 20, 21, 18, 19, 16, 17,
|
||||
26, 27, 24, 25, 28, 29, 30, 31, 36, 37, 34, 35, 32, 33, 38, 39, 46, 47, 44, 45, 42, 43, 40, 41};
|
||||
EXPECT_EQ(out_tensor.ElementsNum(), 2 * 3 * 4 * 2);
|
||||
|
||||
for (int i = 0; i < 2 * 3 * 4 * 2; i++) {
|
||||
EXPECT_EQ(output_data[i], expect[i]);
|
||||
}
|
||||
|
||||
in_tensor0.SetData(nullptr);
|
||||
in_tensor1.SetData(nullptr);
|
||||
out_tensor.SetData(nullptr);
|
||||
}
|
||||
|
||||
TEST_F(TestReverseSequenceFp32, BatchGreaterSeq) {
|
||||
lite::tensor::Tensor in_tensor0(kNumberTypeFloat32, {2, 3, 4, 2});
|
||||
lite::tensor::Tensor in_tensor1(kNumberTypeInt32, {4});
|
||||
lite::tensor::Tensor out_tensor(kNumberTypeFloat32, {2, 3, 4, 2});
|
||||
float input_data0[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
|
||||
16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
|
||||
32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47};
|
||||
int input_data1[] = {2, 3, 3, 2};
|
||||
float output_data[2 * 3 * 4 * 2] = {0};
|
||||
in_tensor0.SetData(input_data0);
|
||||
in_tensor1.SetData(input_data1);
|
||||
out_tensor.SetData(output_data);
|
||||
std::vector<lite::tensor::Tensor *> inputs = {&in_tensor0, &in_tensor1};
|
||||
std::vector<lite::tensor::Tensor *> outputs = {&out_tensor};
|
||||
|
||||
ReverseSequenceParameter parameter = {0};
|
||||
parameter.batch_axis_ = 2;
|
||||
parameter.seq_axis_ = 1;
|
||||
kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeFloat32, schema::PrimitiveType_ReverseSequence};
|
||||
|
||||
auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc);
|
||||
EXPECT_NE(creator, nullptr);
|
||||
|
||||
auto ctx = std::make_shared<lite::Context>();
|
||||
auto kernel = creator(inputs, outputs, reinterpret_cast<OpParameter *>(¶meter), ctx.get(), desc, nullptr);
|
||||
EXPECT_NE(kernel, nullptr);
|
||||
|
||||
auto ret = kernel->Run();
|
||||
EXPECT_EQ(0, ret);
|
||||
|
||||
float expect[] = {8, 9, 18, 19, 20, 21, 14, 15, 0, 1, 10, 11, 12, 13, 6, 7, 16, 17, 2, 3, 4, 5, 22, 23,
|
||||
32, 33, 42, 43, 44, 45, 38, 39, 24, 25, 34, 35, 36, 37, 30, 31, 40, 41, 26, 27, 28, 29, 46, 47};
|
||||
EXPECT_EQ(out_tensor.ElementsNum(), 2 * 3 * 4 * 2);
|
||||
|
||||
for (int i = 0; i < 2 * 3 * 4 * 2; i++) {
|
||||
EXPECT_EQ(output_data[i], expect[i]);
|
||||
}
|
||||
|
||||
in_tensor0.SetData(nullptr);
|
||||
in_tensor1.SetData(nullptr);
|
||||
out_tensor.SetData(nullptr);
|
||||
}
|
||||
|
||||
TEST_F(TestReverseSequenceFp32, BatchSeqNotAdjacent) {
|
||||
lite::tensor::Tensor in_tensor0(kNumberTypeFloat32, {2, 3, 4, 2});
|
||||
lite::tensor::Tensor in_tensor1(kNumberTypeInt32, {2});
|
||||
lite::tensor::Tensor out_tensor(kNumberTypeFloat32, {2, 3, 4, 2});
|
||||
float input_data0[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
|
||||
16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
|
||||
32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47};
|
||||
int input_data1[] = {2, 4};
|
||||
float output_data[2 * 3 * 4 * 2] = {0};
|
||||
in_tensor0.SetData(input_data0);
|
||||
in_tensor1.SetData(input_data1);
|
||||
out_tensor.SetData(output_data);
|
||||
std::vector<lite::tensor::Tensor *> inputs = {&in_tensor0, &in_tensor1};
|
||||
std::vector<lite::tensor::Tensor *> outputs = {&out_tensor};
|
||||
|
||||
ReverseSequenceParameter parameter = {0};
|
||||
parameter.batch_axis_ = 0;
|
||||
parameter.seq_axis_ = 2;
|
||||
kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeFloat32, schema::PrimitiveType_ReverseSequence};
|
||||
|
||||
auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc);
|
||||
EXPECT_NE(creator, nullptr);
|
||||
|
||||
auto ctx = std::make_shared<lite::Context>();
|
||||
auto kernel = creator(inputs, outputs, reinterpret_cast<OpParameter *>(¶meter), ctx.get(), desc, nullptr);
|
||||
EXPECT_NE(kernel, nullptr);
|
||||
|
||||
auto ret = kernel->Run();
|
||||
EXPECT_EQ(0, ret);
|
||||
|
||||
float expect[] = {2, 3, 0, 1, 4, 5, 6, 7, 10, 11, 8, 9, 12, 13, 14, 15, 18, 19, 16, 17, 20, 21, 22, 23,
|
||||
30, 31, 28, 29, 26, 27, 24, 25, 38, 39, 36, 37, 34, 35, 32, 33, 46, 47, 44, 45, 42, 43, 40, 41};
|
||||
EXPECT_EQ(out_tensor.ElementsNum(), 2 * 3 * 4 * 2);
|
||||
|
||||
for (int i = 0; i < 2 * 3 * 4 * 2; i++) {
|
||||
EXPECT_EQ(output_data[i], expect[i]);
|
||||
}
|
||||
|
||||
in_tensor0.SetData(nullptr);
|
||||
in_tensor1.SetData(nullptr);
|
||||
out_tensor.SetData(nullptr);
|
||||
}
|
||||
} // namespace mindspore
|
|
@ -0,0 +1,70 @@
|
|||
/**
|
||||
* Copyright 2020 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include <iostream>
|
||||
#include <memory>
|
||||
#include "common/common_test.h"
|
||||
#include "mindspore/lite/src/runtime/kernel/arm/nnacl/fp32/tile.h"
|
||||
#include "mindspore/lite/src/kernel_registry.h"
|
||||
|
||||
namespace mindspore {
|
||||
class TestTileFp32 : public mindspore::CommonTest {
|
||||
public:
|
||||
TestTileFp32() {}
|
||||
};
|
||||
|
||||
TEST_F(TestTileFp32, Tile) {
|
||||
lite::tensor::Tensor in_tensor(kNumberTypeFloat32, {2, 2});
|
||||
lite::tensor::Tensor out_tensor(kNumberTypeFloat32, {4, 6});
|
||||
float input_data[] = {1, 2, 3, 4};
|
||||
float output_data[24] = {0};
|
||||
in_tensor.SetData(input_data);
|
||||
out_tensor.SetData(output_data);
|
||||
std::vector<lite::tensor::Tensor *> inputs = {&in_tensor};
|
||||
std::vector<lite::tensor::Tensor *> outputs = {&out_tensor};
|
||||
|
||||
TileParameter parameter = {0};
|
||||
parameter.in_dim_ = 2;
|
||||
parameter.in_shape_[0] = 2;
|
||||
parameter.in_shape_[1] = 2;
|
||||
parameter.multiples_[0] = 2;
|
||||
parameter.multiples_[1] = 3;
|
||||
parameter.in_strides_[0] = 2;
|
||||
parameter.in_strides_[1] = 1;
|
||||
parameter.out_strides_[0] = 6;
|
||||
parameter.out_strides_[1] = 1;
|
||||
|
||||
kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeFloat32, schema::PrimitiveType_Tile};
|
||||
|
||||
auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc);
|
||||
EXPECT_NE(creator, nullptr);
|
||||
|
||||
auto ctx = std::make_shared<lite::Context>();
|
||||
auto kernel = creator(inputs, outputs, reinterpret_cast<OpParameter *>(¶meter), ctx.get(), desc, nullptr);
|
||||
EXPECT_NE(kernel, nullptr);
|
||||
|
||||
auto ret = kernel->Run();
|
||||
EXPECT_EQ(0, ret);
|
||||
|
||||
float expect[] = {1, 2, 1, 2, 1, 2, 3, 4, 3, 4, 3, 4, 1, 2, 1, 2, 1, 2, 3, 4, 3, 4, 3, 4};
|
||||
for (int i = 0; i < 24; ++i) {
|
||||
EXPECT_EQ(output_data[i], expect[i]);
|
||||
}
|
||||
|
||||
in_tensor.SetData(nullptr);
|
||||
out_tensor.SetData(nullptr);
|
||||
}
|
||||
} // namespace mindspore
|
|
@ -0,0 +1,70 @@
|
|||
/**
|
||||
* Copyright 2020 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include <iostream>
|
||||
#include <memory>
|
||||
#include "common/common_test.h"
|
||||
#include "mindspore/lite/src/runtime/kernel/arm/nnacl/fp32/unique.h"
|
||||
#include "mindspore/lite/src/kernel_registry.h"
|
||||
|
||||
namespace mindspore {
|
||||
class TestUniqueFp32 : public mindspore::CommonTest {
|
||||
public:
|
||||
TestUniqueFp32() {}
|
||||
};
|
||||
|
||||
TEST_F(TestUniqueFp32, Unique) {
|
||||
lite::tensor::Tensor in_tensor(kNumberTypeFloat32, {9});
|
||||
lite::tensor::Tensor out_tensor0(kNumberTypeFloat32, {9});
|
||||
lite::tensor::Tensor out_tensor1(kNumberTypeInt32, {9});
|
||||
float input_data[] = {1, 1, 2, 4, 4, 4, 7, 8, 8};
|
||||
float output_data0[9] = {0};
|
||||
int output_data1[9] = {0};
|
||||
in_tensor.SetData(input_data);
|
||||
out_tensor0.SetData(output_data0);
|
||||
out_tensor1.SetData(output_data1);
|
||||
std::vector<lite::tensor::Tensor *> inputs = {&in_tensor};
|
||||
std::vector<lite::tensor::Tensor *> outputs = {&out_tensor0, &out_tensor1};
|
||||
|
||||
OpParameter parameter = {0};
|
||||
kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeFloat32, schema::PrimitiveType_Unique};
|
||||
|
||||
auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc);
|
||||
EXPECT_NE(creator, nullptr);
|
||||
|
||||
auto ctx = std::make_shared<lite::Context>();
|
||||
auto kernel = creator(inputs, outputs, ¶meter, ctx.get(), desc, nullptr);
|
||||
EXPECT_NE(kernel, nullptr);
|
||||
|
||||
auto ret = kernel->Run();
|
||||
EXPECT_EQ(0, ret);
|
||||
|
||||
float expect0[] = {1, 2, 4, 7, 8};
|
||||
int expect1[] = {0, 0, 1, 2, 2, 2, 3, 4, 4};
|
||||
EXPECT_EQ(out_tensor0.ElementsNum(), 5);
|
||||
|
||||
for (int i = 0; i < 5; i++) {
|
||||
EXPECT_EQ(output_data0[i], expect0[i]);
|
||||
}
|
||||
for (int i = 0; i < 9; ++i) {
|
||||
EXPECT_EQ(output_data1[i], expect1[i]);
|
||||
}
|
||||
|
||||
in_tensor.SetData(nullptr);
|
||||
out_tensor0.SetData(nullptr);
|
||||
out_tensor1.SetData(nullptr);
|
||||
}
|
||||
} // namespace mindspore
|
|
@ -0,0 +1,122 @@
|
|||
/**
|
||||
* Copyright 2020 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include <iostream>
|
||||
#include <memory>
|
||||
#include "common/common_test.h"
|
||||
#include "mindspore/lite/src/runtime/kernel/arm/nnacl/unstack.h"
|
||||
#include "mindspore/lite/src/kernel_registry.h"
|
||||
|
||||
namespace mindspore {
|
||||
class TestUnstackFp32 : public mindspore::CommonTest {
|
||||
public:
|
||||
TestUnstackFp32() {}
|
||||
};
|
||||
|
||||
TEST_F(TestUnstackFp32, Unstack) {
|
||||
lite::tensor::Tensor in_tensor(kNumberTypeFloat32, {3, 4, 2});
|
||||
lite::tensor::Tensor out_tensor0(kNumberTypeFloat32, {3, 2});
|
||||
lite::tensor::Tensor out_tensor1(kNumberTypeFloat32, {3, 2});
|
||||
lite::tensor::Tensor out_tensor2(kNumberTypeFloat32, {3, 2});
|
||||
lite::tensor::Tensor out_tensor3(kNumberTypeFloat32, {3, 2});
|
||||
float input_data[] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24};
|
||||
float output_data0[6] = {0};
|
||||
float output_data1[6] = {0};
|
||||
float output_data2[6] = {0};
|
||||
float output_data3[6] = {0};
|
||||
in_tensor.SetData(input_data);
|
||||
out_tensor0.SetData(output_data0);
|
||||
out_tensor1.SetData(output_data1);
|
||||
out_tensor2.SetData(output_data2);
|
||||
out_tensor3.SetData(output_data3);
|
||||
std::vector<lite::tensor::Tensor *> inputs = {&in_tensor};
|
||||
std::vector<lite::tensor::Tensor *> outputs = {&out_tensor0, &out_tensor1, &out_tensor2, &out_tensor3};
|
||||
|
||||
UnstackParameter parameter = {{}, 4, -2, 3, 4, 2};
|
||||
kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeFloat32, schema::PrimitiveType_Unstack};
|
||||
|
||||
auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc);
|
||||
EXPECT_NE(creator, nullptr);
|
||||
|
||||
auto ctx = std::make_shared<lite::Context>();
|
||||
auto kernel = creator(inputs, outputs, reinterpret_cast<OpParameter *>(¶meter), ctx.get(), desc, nullptr);
|
||||
EXPECT_NE(kernel, nullptr);
|
||||
|
||||
auto ret = kernel->Run();
|
||||
EXPECT_EQ(0, ret);
|
||||
|
||||
float expect0[] = {1, 2, 9, 10, 17, 18};
|
||||
float expect1[] = {3, 4, 11, 12, 19, 20};
|
||||
float expect2[] = {5, 6, 13, 14, 21, 22};
|
||||
float expect3[] = {7, 8, 15, 16, 23, 24};
|
||||
for (int i = 0; i < 6; ++i) {
|
||||
EXPECT_EQ(output_data0[i], expect0[i]);
|
||||
EXPECT_EQ(output_data1[i], expect1[i]);
|
||||
EXPECT_EQ(output_data2[i], expect2[i]);
|
||||
EXPECT_EQ(output_data3[i], expect3[i]);
|
||||
}
|
||||
|
||||
in_tensor.SetData(nullptr);
|
||||
out_tensor0.SetData(nullptr);
|
||||
out_tensor1.SetData(nullptr);
|
||||
out_tensor2.SetData(nullptr);
|
||||
out_tensor3.SetData(nullptr);
|
||||
}
|
||||
|
||||
TEST_F(TestUnstackFp32, Unstack2) {
|
||||
lite::tensor::Tensor in_tensor(kNumberTypeFloat32, {3, 4, 2});
|
||||
lite::tensor::Tensor out_tensor0(kNumberTypeFloat32, {4, 2});
|
||||
lite::tensor::Tensor out_tensor1(kNumberTypeFloat32, {4, 2});
|
||||
lite::tensor::Tensor out_tensor2(kNumberTypeFloat32, {4, 2});
|
||||
float input_data[] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24};
|
||||
float output_data0[8] = {0};
|
||||
float output_data1[8] = {0};
|
||||
float output_data2[8] = {0};
|
||||
in_tensor.SetData(input_data);
|
||||
out_tensor0.SetData(output_data0);
|
||||
out_tensor1.SetData(output_data1);
|
||||
out_tensor2.SetData(output_data2);
|
||||
std::vector<lite::tensor::Tensor *> inputs = {&in_tensor};
|
||||
std::vector<lite::tensor::Tensor *> outputs = {&out_tensor0, &out_tensor1, &out_tensor2};
|
||||
|
||||
UnstackParameter parameter = {{}, 3, 0, 1, 3, 8};
|
||||
kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeFloat32, schema::PrimitiveType_Unstack};
|
||||
|
||||
auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc);
|
||||
EXPECT_NE(creator, nullptr);
|
||||
|
||||
auto ctx = std::make_shared<lite::Context>();
|
||||
auto kernel = creator(inputs, outputs, reinterpret_cast<OpParameter *>(¶meter), ctx.get(), desc, nullptr);
|
||||
EXPECT_NE(kernel, nullptr);
|
||||
|
||||
auto ret = kernel->Run();
|
||||
EXPECT_EQ(0, ret);
|
||||
|
||||
float expect0[] = {1, 2, 3, 4, 5, 6, 7, 8};
|
||||
float expect1[] = {9, 10, 11, 12, 13, 14, 15, 16};
|
||||
float expect2[] = {17, 18, 19, 20, 21, 22, 23, 24};
|
||||
for (int i = 0; i < 6; ++i) {
|
||||
EXPECT_EQ(output_data0[i], expect0[i]);
|
||||
EXPECT_EQ(output_data1[i], expect1[i]);
|
||||
EXPECT_EQ(output_data2[i], expect2[i]);
|
||||
}
|
||||
|
||||
in_tensor.SetData(nullptr);
|
||||
out_tensor0.SetData(nullptr);
|
||||
out_tensor1.SetData(nullptr);
|
||||
out_tensor2.SetData(nullptr);
|
||||
}
|
||||
} // namespace mindspore
|
|
@ -0,0 +1,75 @@
|
|||
/**
|
||||
* Copyright 2020 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include <iostream>
|
||||
#include <memory>
|
||||
#include "common/common_test.h"
|
||||
#include "mindspore/lite/src/runtime/kernel/arm/int8/bias_add_int8.h"
|
||||
#include "mindspore/lite/src/kernel_registry.h"
|
||||
|
||||
using mindspore::lite::DeviceType;
|
||||
|
||||
namespace mindspore {
|
||||
class TestBiasAddInt8 : public mindspore::CommonTest {
|
||||
public:
|
||||
TestBiasAddInt8() {}
|
||||
};
|
||||
|
||||
TEST_F(TestBiasAddInt8, BiasAdd) {
|
||||
lite::tensor::Tensor in_tensor0(kNumberTypeInt8, {1, 2, 3, 2});
|
||||
lite::tensor::Tensor in_tensor1(kNumberTypeInt8, {2});
|
||||
lite::tensor::Tensor out_tensor(kNumberTypeInt8, {1, 2, 3, 2});
|
||||
int8_t input_data0[] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12};
|
||||
int8_t input_data1[] = {1, 1};
|
||||
int8_t output_data[12] = {0};
|
||||
in_tensor0.SetData(input_data0);
|
||||
in_tensor1.SetData(input_data1);
|
||||
out_tensor.SetData(output_data);
|
||||
std::vector<lite::tensor::Tensor *> inputs = {&in_tensor0, &in_tensor1};
|
||||
std::vector<lite::tensor::Tensor *> outputs = {&out_tensor};
|
||||
|
||||
ArithmeticParameter parameter = {};
|
||||
int dims[] = {1, 2, 3, 4};
|
||||
parameter.ndim_ = 4;
|
||||
for (int i = 0; i < 4; i++) {
|
||||
parameter.in_shape0_[i] = dims[i];
|
||||
parameter.in_shape1_[i] = 1;
|
||||
parameter.out_shape_[i] = dims[i];
|
||||
}
|
||||
parameter.in_shape1_[3] = dims[3];
|
||||
|
||||
kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeInt8, schema::PrimitiveType_BiasAdd};
|
||||
|
||||
auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc);
|
||||
EXPECT_NE(creator, nullptr);
|
||||
|
||||
auto ctx = std::make_shared<lite::Context>();
|
||||
auto kernel = creator(inputs, outputs, reinterpret_cast<OpParameter *>(¶meter), ctx.get(), desc, nullptr);
|
||||
EXPECT_NE(kernel, nullptr);
|
||||
|
||||
auto ret = kernel->Run();
|
||||
EXPECT_EQ(0, ret);
|
||||
|
||||
float expect[] = {2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13};
|
||||
for (int i = 0; i < 12; ++i) {
|
||||
EXPECT_EQ(output_data[i], expect[i]);
|
||||
}
|
||||
|
||||
in_tensor0.SetData(nullptr);
|
||||
in_tensor1.SetData(nullptr);
|
||||
out_tensor.SetData(nullptr);
|
||||
}
|
||||
} // namespace mindspore
|
Loading…
Reference in New Issue