forked from mindspore-Ecosystem/mindspore
!8512 move compareOutput functions to /test
From: @lyvette Reviewed-by: Signed-off-by:
This commit is contained in:
commit
2b4497a8c1
|
@ -84,37 +84,5 @@ std::string RealPath(const char *path) {
|
|||
std::string res = resolvedPath.get();
|
||||
return res;
|
||||
}
|
||||
|
||||
int CompareOutputData(const float *output_data, size_t output_size, const float *correct_data, size_t data_size) {
|
||||
if (output_size != data_size) {
|
||||
printf("compare failed, output_size %zu isn't equal to data_size %zu.\n", output_size, data_size);
|
||||
return 0;
|
||||
}
|
||||
float error = 0;
|
||||
for (size_t i = 0; i < data_size; i++) {
|
||||
float abs = fabs(output_data[i] - correct_data[i]);
|
||||
if (abs > 0.00001) {
|
||||
error += abs;
|
||||
}
|
||||
}
|
||||
error /= data_size;
|
||||
|
||||
if (error > 0.0001) {
|
||||
printf("has accuracy error!\n");
|
||||
printf("%f\n", error);
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int CompareOutput(const float *output_data, size_t output_num, const std::string &file_path) {
|
||||
size_t ground_truth_size = 0;
|
||||
auto ground_truth = reinterpret_cast<float *>(mindspore::lite::ReadFile(file_path.c_str(), &ground_truth_size));
|
||||
size_t ground_truth_num = ground_truth_size / sizeof(float);
|
||||
printf("ground truth num : %zu\n", ground_truth_num);
|
||||
int res = CompareOutputData(output_data, output_num, ground_truth, ground_truth_num);
|
||||
delete[] ground_truth;
|
||||
return res;
|
||||
}
|
||||
} // namespace lite
|
||||
} // namespace mindspore
|
||||
|
|
|
@ -58,9 +58,6 @@ inline int WriteToBin(const std::string &file_path, void *data, size_t size) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
int CompareOutputData(const float *output_data, size_t output_num, const float *correct_data, size_t data_size);
|
||||
int CompareOutput(const float *output_data, size_t output_num, const std::string &file_path);
|
||||
|
||||
std::string GetAndroidPackageName();
|
||||
std::string GetAndroidPackagePath();
|
||||
} // namespace lite
|
||||
|
|
|
@ -1,65 +0,0 @@
|
|||
/**
|
||||
* Copyright 2020 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#include <cmath>
|
||||
#include <cstddef>
|
||||
#include <iostream>
|
||||
#include "src/common/file_utils.h"
|
||||
#include "src/common/file_utils_ext.h"
|
||||
|
||||
namespace mindspore {
|
||||
namespace lite {
|
||||
static float CompareOutputRelativeData(const float *output_data, const float *correct_data, int data_size) {
|
||||
float error = 0;
|
||||
|
||||
// relative error
|
||||
float diffSum = 0.0f;
|
||||
float sum = 0.0f;
|
||||
for (int i = 0; i < data_size; i++) {
|
||||
sum += std::abs(correct_data[i]);
|
||||
}
|
||||
for (int i = 0; i < data_size; i++) {
|
||||
float diff = std::abs(output_data[i] - correct_data[i]);
|
||||
diffSum += diff;
|
||||
}
|
||||
error = diffSum / sum;
|
||||
return error;
|
||||
}
|
||||
|
||||
int CompareRelativeOutput(const float *output_data, std::string file_path) {
|
||||
size_t output_size;
|
||||
auto ground_truth = reinterpret_cast<float *>(mindspore::lite::ReadFile(file_path.c_str(), &output_size));
|
||||
if (ground_truth == nullptr) {
|
||||
return 1;
|
||||
}
|
||||
size_t output_num = output_size / sizeof(float);
|
||||
float error = CompareOutputRelativeData(output_data, ground_truth, output_num);
|
||||
delete[] ground_truth;
|
||||
if (error > 1e-4) {
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
float RelativeOutputError(const float *output_data, std::string file_path) {
|
||||
size_t output_size = 0;
|
||||
auto ground_truth = reinterpret_cast<float *>(mindspore::lite::ReadFile(file_path.c_str(), &output_size));
|
||||
size_t output_num = output_size / sizeof(float);
|
||||
float error = CompareOutputRelativeData(output_data, ground_truth, output_num);
|
||||
delete[] ground_truth;
|
||||
return error;
|
||||
}
|
||||
} // namespace lite
|
||||
} // namespace mindspore
|
|
@ -1,27 +0,0 @@
|
|||
/**
|
||||
* Copyright 2020 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDSPORE_LITE_COMMON_FILE_UTILS_EXT_H_
|
||||
#define MINDSPORE_LITE_COMMON_FILE_UTILS_EXT_H_
|
||||
#include <string>
|
||||
|
||||
namespace mindspore {
|
||||
namespace lite {
|
||||
int CompareRelativeOutput(const float *output_data, std::string file_path);
|
||||
float RelativeOutputError(const float *output_data, std::string file_path);
|
||||
} // namespace lite
|
||||
} // namespace mindspore
|
||||
#endif // MINDSPORE_LITE_COMMON_FILE_UTILS_EXT_H_
|
|
@ -133,7 +133,6 @@ set(TEST_LITE_SRC
|
|||
${LITE_DIR}/src/scheduler.cc
|
||||
${LITE_DIR}/src/common/graph_util.cc
|
||||
${LITE_DIR}/src/common/file_utils.cc
|
||||
${LITE_DIR}/src/common/file_utils_ext.cc
|
||||
${LITE_DIR}/src/common/utils.cc
|
||||
${LITE_DIR}/src/common/string_util.cc
|
||||
${LITE_DIR}/tools/common/graph_util.cc
|
||||
|
|
|
@ -22,6 +22,8 @@
|
|||
#include <string>
|
||||
#include <algorithm>
|
||||
#include "gtest/gtest.h"
|
||||
#include "src/common/file_utils.h"
|
||||
|
||||
namespace mindspore {
|
||||
class CommonTest : public testing::Test {
|
||||
public:
|
||||
|
@ -34,7 +36,7 @@ class CommonTest : public testing::Test {
|
|||
virtual void TearDown();
|
||||
|
||||
template <typename T>
|
||||
void PrintData(std::string name, T *output_data, int size) {
|
||||
void PrintData(const std::string &name, T *output_data, int size) {
|
||||
std::cout << "The " << name << " is as follows:" << std::endl;
|
||||
if (typeid(output_data[0]) == typeid(uint8_t) || typeid(output_data[0]) == typeid(int8_t)) {
|
||||
for (int i = 0; i < std::min(size, 100); i++) {
|
||||
|
@ -49,14 +51,22 @@ class CommonTest : public testing::Test {
|
|||
}
|
||||
|
||||
template <typename T>
|
||||
static void CompareOutputData(T *output_data, T *correct_data, int size, float err_bound) {
|
||||
static int CompareOutputData(const T *output_data, const T *correct_data, int size, float err_bound = 1e-4) {
|
||||
float error = 0;
|
||||
for (int i = 0; i < size; i++) {
|
||||
T abs = fabs(output_data[i] - correct_data[i]);
|
||||
ASSERT_LE(abs, err_bound);
|
||||
T diff = std::fabs(output_data[i] - correct_data[i]);
|
||||
if (diff > 0.00001) {
|
||||
error += diff;
|
||||
}
|
||||
}
|
||||
error /= static_cast<float>(size);
|
||||
if (error > err_bound) {
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void CompareOutputInt8(int8_t *output_data, int8_t *correct_data, int size, float err_percent) {
|
||||
static void CompareOutputInt8(int8_t *output_data, int8_t *correct_data, int size, float err_percent) {
|
||||
int bias_count = 0;
|
||||
for (int i = 0; i < size; i++) {
|
||||
int8_t diff = abs(output_data[i] - correct_data[i]);
|
||||
|
@ -65,11 +75,62 @@ class CommonTest : public testing::Test {
|
|||
bias_count++;
|
||||
}
|
||||
}
|
||||
float bias_percent = static_cast<float>(bias_count) / size;
|
||||
float bias_percent = static_cast<float>(bias_count) / static_cast<float>(size);
|
||||
ASSERT_LE(bias_percent, err_percent);
|
||||
}
|
||||
|
||||
void ReadFile(const char *file, size_t *size, char **buf) {
|
||||
static int CompareOutput(const float *output_data, size_t output_num, const std::string &file_path) {
|
||||
size_t ground_truth_size = 0;
|
||||
auto ground_truth = reinterpret_cast<float *>(lite::ReadFile(file_path.c_str(), &ground_truth_size));
|
||||
size_t ground_truth_num = ground_truth_size / sizeof(float);
|
||||
printf("ground truth num : %zu\n", ground_truth_num);
|
||||
int res = CompareOutputData(output_data, ground_truth, ground_truth_num);
|
||||
delete[] ground_truth;
|
||||
return res;
|
||||
}
|
||||
|
||||
static float CompareOutputRelativeData(const float *output_data, const float *correct_data, int data_size) {
|
||||
float error = 0;
|
||||
|
||||
// relative error
|
||||
float diffSum = 0.0f;
|
||||
float sum = 0.0f;
|
||||
for (int i = 0; i < data_size; i++) {
|
||||
sum += std::abs(correct_data[i]);
|
||||
}
|
||||
for (int i = 0; i < data_size; i++) {
|
||||
float diff = std::abs(output_data[i] - correct_data[i]);
|
||||
diffSum += diff;
|
||||
}
|
||||
error = diffSum / sum;
|
||||
return error;
|
||||
}
|
||||
|
||||
static int CompareRelativeOutput(const float *output_data, const std::string &file_path) {
|
||||
size_t output_size;
|
||||
auto ground_truth = reinterpret_cast<float *>(mindspore::lite::ReadFile(file_path.c_str(), &output_size));
|
||||
if (ground_truth == nullptr) {
|
||||
return 1;
|
||||
}
|
||||
size_t output_num = output_size / sizeof(float);
|
||||
float error = CompareOutputRelativeData(output_data, ground_truth, output_num);
|
||||
delete[] ground_truth;
|
||||
if (error > 1e-4) {
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static float RelativeOutputError(const float *output_data, const std::string &file_path) {
|
||||
size_t output_size = 0;
|
||||
auto ground_truth = reinterpret_cast<float *>(mindspore::lite::ReadFile(file_path.c_str(), &output_size));
|
||||
size_t output_num = output_size / sizeof(float);
|
||||
float error = CompareOutputRelativeData(output_data, ground_truth, output_num);
|
||||
delete[] ground_truth;
|
||||
return error;
|
||||
}
|
||||
|
||||
static void ReadFile(const char *file, size_t *size, char **buf) {
|
||||
ASSERT_NE(nullptr, file);
|
||||
ASSERT_NE(nullptr, size);
|
||||
ASSERT_NE(nullptr, buf);
|
||||
|
|
|
@ -72,7 +72,7 @@ TEST_F(InferTest, TestSession) {
|
|||
std::cout << *(reinterpret_cast<float *>(outvec.at(0)->data_) + i) << " ";
|
||||
}
|
||||
std::cout << "\n";
|
||||
CompareOutputData(reinterpret_cast<float *>(outvec.at(0)->data_), expect_out, kOutSize, 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(reinterpret_cast<float *>(outvec.at(0)->data_), expect_out, kOutSize, 0.000001));
|
||||
DestroyTensor(in);
|
||||
DestroyTensor(out);
|
||||
}
|
||||
|
|
|
@ -90,8 +90,8 @@ TEST_F(TestInternalArithmeticFp32, MulTest) {
|
|||
out_tensors[0]->data_ = new float[correct_out.size()];
|
||||
DoArithmetic(in_tensors, out_tensors, node, &allocator);
|
||||
|
||||
CompareOutputData(reinterpret_cast<float *>(out_tensors.front()->data_), correct_out.data(), correct_out.size(),
|
||||
0.00001);
|
||||
ASSERT_EQ(0, CompareOutputData(reinterpret_cast<float *>(out_tensors.front()->data_), correct_out.data(),
|
||||
correct_out.size(), 0.00001));
|
||||
|
||||
delete[] out_tensors[0]->data_;
|
||||
delete node;
|
||||
|
|
|
@ -82,8 +82,8 @@ TEST_F(TestInternalBiasAddFp32, BiasAddTest) {
|
|||
out_tensors[0]->data_ = new float[correct_out.size()];
|
||||
DoBiasAdd(in_tensors, out_tensors, node, &allocator);
|
||||
|
||||
CompareOutputData(reinterpret_cast<float *>(out_tensors.front()->data_), correct_out.data(), correct_out.size(),
|
||||
0.00001);
|
||||
ASSERT_EQ(0, CompareOutputData(reinterpret_cast<float *>(out_tensors.front()->data_), correct_out.data(),
|
||||
correct_out.size(), 0.00001));
|
||||
|
||||
delete out_tensors[0]->data_;
|
||||
delete node;
|
||||
|
|
|
@ -77,7 +77,7 @@ TEST_F(TestInternalReduceFp32, ReduceSumOneAxisTest) {
|
|||
|
||||
DoReduce(in_tensors, out_tensors, node, &allocator);
|
||||
|
||||
CompareOutputData(reinterpret_cast<float *>(out_tensors.front()->data_), correct, 24, 0.00001);
|
||||
ASSERT_EQ(0, CompareOutputData(reinterpret_cast<float *>(out_tensors.front()->data_), correct, 24, 0.00001));
|
||||
delete out_tensors[0]->data_;
|
||||
delete node;
|
||||
delete params;
|
||||
|
@ -126,7 +126,7 @@ TEST_F(TestInternalReduceFp32, ReduceSumAllAxisTest) {
|
|||
|
||||
DoReduce(in_tensors, out_tensors, node, &allocator);
|
||||
|
||||
CompareOutputData(reinterpret_cast<float *>(out_tensors.front()->data_), correct, 1, 0.00001);
|
||||
ASSERT_EQ(0, CompareOutputData(reinterpret_cast<float *>(out_tensors.front()->data_), correct, 1, 0.00001));
|
||||
delete out_tensors[0]->data_;
|
||||
delete node;
|
||||
delete params;
|
||||
|
@ -180,7 +180,7 @@ TEST_F(TestInternalReduceFp32, ReduceMeanOneAxisTest) {
|
|||
|
||||
DoReduce(in_tensors, out_tensors, node, &allocator);
|
||||
|
||||
CompareOutputData(reinterpret_cast<float *>(out_tensors.front()->data_), correct, 24, 0.00001);
|
||||
ASSERT_EQ(0, CompareOutputData(reinterpret_cast<float *>(out_tensors.front()->data_), correct, 24, 0.00001));
|
||||
delete out_tensors[0]->data_;
|
||||
delete node;
|
||||
delete params;
|
||||
|
@ -234,7 +234,7 @@ TEST_F(TestInternalReduceFp32, ReduceMeanAllAxisTest) {
|
|||
|
||||
DoReduce(in_tensors, out_tensors, node, &allocator);
|
||||
|
||||
CompareOutputData(reinterpret_cast<float *>(out_tensors.front()->data_), correct, 1, 0.00001);
|
||||
ASSERT_EQ(0, CompareOutputData(reinterpret_cast<float *>(out_tensors.front()->data_), correct, 1, 0.00001));
|
||||
delete out_tensors[0]->data_;
|
||||
delete node;
|
||||
delete params;
|
||||
|
|
|
@ -71,7 +71,7 @@ TEST_F(TestStridedSlice, StridedSlice) {
|
|||
EXPECT_EQ(0, ret);
|
||||
|
||||
float expect[2] = {0.2390374, 0.05051243};
|
||||
CompareOutputData(output_data, expect, 2, 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(output_data, expect, 2, 0.000001));
|
||||
|
||||
in_tensor.set_data(nullptr);
|
||||
out_tensor.set_data(nullptr);
|
||||
|
|
|
@ -95,7 +95,7 @@ TEST_F(TestReduceFp16, Mean) {
|
|||
int num_axis = 1;
|
||||
int thread_num = 1;
|
||||
Prepare(input_shape, output_shape, in, out, num_axis, axes, thread_num);
|
||||
CompareOutputData(out, correct, 24, 1e-3);
|
||||
ASSERT_EQ(0, CompareOutputData(out, correct, 24, 1e-3));
|
||||
}
|
||||
|
||||
} // namespace mindspore
|
||||
|
|
|
@ -132,7 +132,7 @@ TEST_F(TestActivationFp32, HSwishFp32) {
|
|||
kernel->Run();
|
||||
|
||||
std::vector<float> expect_output = {-0, -0.33333334, -0.33333334, 0, 0.6666667, 5, 6, 7};
|
||||
CompareOutputData(output.data(), expect_output.data(), 8, 0.00001);
|
||||
ASSERT_EQ(0, CompareOutputData(output.data(), expect_output.data(), 8, 0.00001));
|
||||
|
||||
input0_tensor.set_data(nullptr);
|
||||
output0_tensor.set_data(nullptr);
|
||||
|
@ -176,7 +176,7 @@ TEST_F(TestActivationFp32, HardTanh1) {
|
|||
kernel->Run();
|
||||
|
||||
std::vector<float> expect_output = {-1.0, -1.0, -0.5, 0.0, 0.5, 1.0, 1.0, 1.0};
|
||||
CompareOutputData(output.data(), expect_output.data(), 8, 0.00001);
|
||||
ASSERT_EQ(0, CompareOutputData(output.data(), expect_output.data(), 8, 0.00001));
|
||||
|
||||
input0_tensor.set_data(nullptr);
|
||||
output0_tensor.set_data(nullptr);
|
||||
|
@ -220,7 +220,7 @@ TEST_F(TestActivationFp32, HardTanh2) {
|
|||
kernel->Run();
|
||||
|
||||
std::vector<float> expect_output = {-2.0, -2.0, -1.0, 0.0, 1.0, 2.0, 2.0, 2.0};
|
||||
CompareOutputData(output.data(), expect_output.data(), 8, 0.00001);
|
||||
ASSERT_EQ(0, CompareOutputData(output.data(), expect_output.data(), 8, 0.00001));
|
||||
|
||||
input0_tensor.set_data(nullptr);
|
||||
output0_tensor.set_data(nullptr);
|
||||
|
|
|
@ -44,7 +44,7 @@ TEST_F(TestArgMinMaxTestFp32, ArgMaxTest1) {
|
|||
std::cout << out[i] << " ";
|
||||
}
|
||||
std::cout << "\n";
|
||||
CompareOutputData(out, except_out.data(), except_out.size(), 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(out, except_out.data(), except_out.size(), 0.000001));
|
||||
}
|
||||
|
||||
TEST_F(TestArgMinMaxTestFp32, ArgMaxTest1_keep_dim) {
|
||||
|
@ -69,7 +69,7 @@ TEST_F(TestArgMinMaxTestFp32, ArgMaxTest1_keep_dim) {
|
|||
std::cout << out[i] << " ";
|
||||
}
|
||||
std::cout << "\n";
|
||||
CompareOutputData(out, except_out.data(), except_out.size(), 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(out, except_out.data(), except_out.size(), 0.000001));
|
||||
}
|
||||
|
||||
TEST_F(TestArgMinMaxTestFp32, ArgMaxTest_axis2_keep_dim) {
|
||||
|
@ -95,7 +95,7 @@ TEST_F(TestArgMinMaxTestFp32, ArgMaxTest_axis2_keep_dim) {
|
|||
std::cout << out[i] << " ";
|
||||
}
|
||||
std::cout << "\n";
|
||||
CompareOutputData(out, except_out.data(), except_out.size(), 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(out, except_out.data(), except_out.size(), 0.000001));
|
||||
}
|
||||
|
||||
TEST_F(TestArgMinMaxTestFp32, ArgMaxTest2) {
|
||||
|
@ -112,7 +112,7 @@ TEST_F(TestArgMinMaxTestFp32, ArgMaxTest2) {
|
|||
param.get_max_ = true;
|
||||
param.keep_dims_ = false;
|
||||
ArgMinMax(in.data(), out, shape.data(), ¶m);
|
||||
CompareOutputData(out, except_out.data(), except_out.size(), 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(out, except_out.data(), except_out.size(), 0.000001));
|
||||
}
|
||||
|
||||
TEST_F(TestArgMinMaxTestFp32, ArgMinTest2) {
|
||||
|
@ -129,7 +129,7 @@ TEST_F(TestArgMinMaxTestFp32, ArgMinTest2) {
|
|||
param.get_max_ = false;
|
||||
param.keep_dims_ = false;
|
||||
ArgMinMax(in.data(), out, shape.data(), ¶m);
|
||||
CompareOutputData(out, except_out.data(), except_out.size(), 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(out, except_out.data(), except_out.size(), 0.000001));
|
||||
}
|
||||
|
||||
TEST_F(TestArgMinMaxTestFp32, ArgMaxTest3_axis2_out_data) {
|
||||
|
@ -146,7 +146,7 @@ TEST_F(TestArgMinMaxTestFp32, ArgMaxTest3_axis2_out_data) {
|
|||
ComputeStrides(out_shape.data(), param.out_strides_, out_shape.size());
|
||||
float out[10];
|
||||
ArgMaxDim2(in.data(), out, in_shape.data(), ¶m);
|
||||
CompareOutputData(out, except_out.data(), except_out.size(), 0.00001);
|
||||
ASSERT_EQ(0, CompareOutputData(out, except_out.data(), except_out.size(), 0.00001));
|
||||
}
|
||||
|
||||
TEST_F(TestArgMinMaxTestFp32, ArgMaxTest3_axis2_out_index) {
|
||||
|
@ -163,7 +163,7 @@ TEST_F(TestArgMinMaxTestFp32, ArgMaxTest3_axis2_out_index) {
|
|||
ComputeStrides(out_shape.data(), param.out_strides_, out_shape.size());
|
||||
float out[10];
|
||||
ArgMaxDim2(in.data(), out, in_shape.data(), ¶m);
|
||||
CompareOutputData(out, except_out.data(), except_out.size(), 0.00001);
|
||||
ASSERT_EQ(0, CompareOutputData(out, except_out.data(), except_out.size(), 0.00001));
|
||||
}
|
||||
|
||||
TEST_F(TestArgMinMaxTestFp32, ArgMaxTest4_axis3_out_data) {
|
||||
|
@ -180,7 +180,7 @@ TEST_F(TestArgMinMaxTestFp32, ArgMaxTest4_axis3_out_data) {
|
|||
ComputeStrides(out_shape.data(), param.out_strides_, out_shape.size());
|
||||
float out[6];
|
||||
ArgMaxDim3(in.data(), out, in_shape.data(), ¶m);
|
||||
CompareOutputData(out, except_out.data(), except_out.size(), 0.00001);
|
||||
ASSERT_EQ(0, CompareOutputData(out, except_out.data(), except_out.size(), 0.00001));
|
||||
}
|
||||
|
||||
TEST_F(TestArgMinMaxTestFp32, ArgMaxTest4_axis3_out_index) {
|
||||
|
@ -197,7 +197,7 @@ TEST_F(TestArgMinMaxTestFp32, ArgMaxTest4_axis3_out_index) {
|
|||
ComputeStrides(out_shape.data(), param.out_strides_, out_shape.size());
|
||||
float out[6];
|
||||
ArgMaxDim3(in.data(), out, in_shape.data(), ¶m);
|
||||
CompareOutputData(out, except_out.data(), except_out.size(), 0.00001);
|
||||
ASSERT_EQ(0, CompareOutputData(out, except_out.data(), except_out.size(), 0.00001));
|
||||
}
|
||||
|
||||
TEST_F(TestArgMinMaxTestFp32, ArgMaxTest5_axis1_out_index) {
|
||||
|
@ -215,7 +215,7 @@ TEST_F(TestArgMinMaxTestFp32, ArgMaxTest5_axis1_out_index) {
|
|||
ComputeStrides(out_shape.data(), param.out_strides_, out_shape.size());
|
||||
float out[24];
|
||||
ArgMaxDim1(in.data(), out, in_shape.data(), ¶m);
|
||||
CompareOutputData(out, except_out.data(), except_out.size(), 0.00001);
|
||||
ASSERT_EQ(0, CompareOutputData(out, except_out.data(), except_out.size(), 0.00001));
|
||||
}
|
||||
|
||||
TEST_F(TestArgMinMaxTestFp32, ArgMaxTest5_axis1_out_data) {
|
||||
|
@ -234,7 +234,7 @@ TEST_F(TestArgMinMaxTestFp32, ArgMaxTest5_axis1_out_data) {
|
|||
ComputeStrides(out_shape.data(), param.out_strides_, out_shape.size());
|
||||
float out[24];
|
||||
ArgMaxDim1(in.data(), out, in_shape.data(), ¶m);
|
||||
CompareOutputData(out, except_out.data(), except_out.size(), 0.00001);
|
||||
ASSERT_EQ(0, CompareOutputData(out, except_out.data(), except_out.size(), 0.00001));
|
||||
}
|
||||
|
||||
TEST_F(TestArgMinMaxTestFp32, ArgMaxTest6_axis0_out_index) {
|
||||
|
@ -251,7 +251,7 @@ TEST_F(TestArgMinMaxTestFp32, ArgMaxTest6_axis0_out_index) {
|
|||
ComputeStrides(out_shape.data(), param.out_strides_, out_shape.size());
|
||||
float out[16];
|
||||
ArgMaxDim0(in.data(), out, in_shape.data(), ¶m);
|
||||
CompareOutputData(out, except_out.data(), except_out.size(), 0.00001);
|
||||
ASSERT_EQ(0, CompareOutputData(out, except_out.data(), except_out.size(), 0.00001));
|
||||
}
|
||||
|
||||
TEST_F(TestArgMinMaxTestFp32, ArgMaxTest6_axis0_out_data) {
|
||||
|
@ -268,7 +268,7 @@ TEST_F(TestArgMinMaxTestFp32, ArgMaxTest6_axis0_out_data) {
|
|||
ComputeStrides(out_shape.data(), param.out_strides_, out_shape.size());
|
||||
float out[16];
|
||||
ArgMaxDim0(in.data(), out, in_shape.data(), ¶m);
|
||||
CompareOutputData(out, except_out.data(), except_out.size(), 0.00001);
|
||||
ASSERT_EQ(0, CompareOutputData(out, except_out.data(), except_out.size(), 0.00001));
|
||||
}
|
||||
|
||||
TEST_F(TestArgMinMaxTestFp32, ArgMinTest1_axis3_out_data) {
|
||||
|
@ -285,7 +285,7 @@ TEST_F(TestArgMinMaxTestFp32, ArgMinTest1_axis3_out_data) {
|
|||
ComputeStrides(out_shape.data(), param.out_strides_, out_shape.size());
|
||||
float out[6];
|
||||
ArgMinDim3(in.data(), out, in_shape.data(), ¶m);
|
||||
CompareOutputData(out, except_out.data(), except_out.size(), 0.00001);
|
||||
ASSERT_EQ(0, CompareOutputData(out, except_out.data(), except_out.size(), 0.00001));
|
||||
}
|
||||
|
||||
} // namespace mindspore
|
||||
|
|
|
@ -123,7 +123,7 @@ TEST_F(TestArithmeticTestFp32, AddTest) {
|
|||
auto tile_data0 = new float[size];
|
||||
auto tile_data1 = new float[size];
|
||||
BroadcastAdd(in_ptr, add_ptr, tile_data0, tile_data1, out, size, add_param);
|
||||
CompareOutputData(out, correct_out_ptr, size, 0.00001);
|
||||
ASSERT_EQ(0, CompareOutputData(out, correct_out_ptr, size, 0.00001));
|
||||
|
||||
delete[] out;
|
||||
delete[] tile_data0;
|
||||
|
@ -166,7 +166,7 @@ TEST_F(TestArithmeticTestFp32, MulTest) {
|
|||
auto tile_data0 = new float[size];
|
||||
auto tile_data1 = new float[size];
|
||||
BroadcastMul(in_ptr, add_ptr, tile_data0, tile_data1, out, size, mul_param);
|
||||
CompareOutputData(out, correct_out_ptr, size, 0.00001);
|
||||
ASSERT_EQ(0, CompareOutputData(out, correct_out_ptr, size, 0.00001));
|
||||
|
||||
delete[] out;
|
||||
delete[] tile_data0;
|
||||
|
@ -209,7 +209,7 @@ TEST_F(TestArithmeticTestFp32, DivTest) {
|
|||
auto tile_data0 = new float[size];
|
||||
auto tile_data1 = new float[size];
|
||||
BroadcastDiv(in_ptr, add_ptr, tile_data0, tile_data1, out, size, div_param);
|
||||
CompareOutputData(out, correct_out_ptr, size, 0.00001);
|
||||
ASSERT_EQ(0, CompareOutputData(out, correct_out_ptr, size, 0.00001));
|
||||
|
||||
delete[] out;
|
||||
delete[] tile_data0;
|
||||
|
@ -229,7 +229,7 @@ TEST_F(TestArithmeticTestFp32, DivTest2) {
|
|||
std::cout << out[i] << " ";
|
||||
}
|
||||
std::cout << "\n";
|
||||
CompareOutputData(out, correct_out.data(), kOutSize, 0.00001);
|
||||
ASSERT_EQ(0, CompareOutputData(out, correct_out.data(), kOutSize, 0.00001));
|
||||
}
|
||||
|
||||
TEST_F(TestArithmeticTestFp32, FloorDivTest) {
|
||||
|
@ -263,7 +263,7 @@ TEST_F(TestArithmeticTestFp32, FloorDivTest) {
|
|||
auto tile_data1 = new float[size];
|
||||
int ret = BroadcastFloorDiv(in_ptr, add_ptr, tile_data0, tile_data1, out, size, fdiv_param);
|
||||
EXPECT_EQ(ret, 0);
|
||||
CompareOutputData(out, correct_out_ptr, size, 0.00001);
|
||||
ASSERT_EQ(0, CompareOutputData(out, correct_out_ptr, size, 0.00001));
|
||||
|
||||
delete[] out;
|
||||
delete[] tile_data0;
|
||||
|
@ -303,7 +303,7 @@ TEST_F(TestArithmeticTestFp32, FloorModTest) {
|
|||
auto tile_data1 = new float[size];
|
||||
int ret = BroadcastFloorMod(in_ptr, add_ptr, tile_data0, tile_data1, out, size, fmod_param);
|
||||
EXPECT_EQ(ret, 0);
|
||||
CompareOutputData(out, correct_out_ptr, size, 0.00001);
|
||||
ASSERT_EQ(0, CompareOutputData(out, correct_out_ptr, size, 0.00001));
|
||||
|
||||
delete[] out;
|
||||
delete[] tile_data0;
|
||||
|
@ -342,7 +342,7 @@ TEST_F(TestArithmeticTestFp32, LogicalAndTest) {
|
|||
auto tile_data0 = new float[size];
|
||||
auto tile_data1 = new float[size];
|
||||
BroadcastLogicalAnd(in_ptr, add_ptr, tile_data0, tile_data1, out, size, logical_and_param);
|
||||
CompareOutputData(out, correct_out_ptr, size, 0.00001);
|
||||
ASSERT_EQ(0, CompareOutputData(out, correct_out_ptr, size, 0.00001));
|
||||
|
||||
delete[] out;
|
||||
delete[] tile_data0;
|
||||
|
@ -383,7 +383,7 @@ TEST_F(TestArithmeticTestFp32, LogicalOrTest) {
|
|||
auto tile_data0 = new float[size];
|
||||
auto tile_data1 = new float[size];
|
||||
BroadcastLogicalOr(in_ptr, add_ptr, tile_data0, tile_data1, out, size, logical_or_param);
|
||||
CompareOutputData(out, correct_out_ptr, size, 0.00001);
|
||||
ASSERT_EQ(0, CompareOutputData(out, correct_out_ptr, size, 0.00001));
|
||||
|
||||
delete[] out;
|
||||
delete[] tile_data0;
|
||||
|
@ -427,7 +427,7 @@ TEST_F(TestArithmeticTestFp32, MaximumTest) {
|
|||
auto tile_data0 = new float[size];
|
||||
auto tile_data1 = new float[size];
|
||||
BroadcastMaximum(in_ptr, add_ptr, tile_data0, tile_data1, out, size, maximum_param);
|
||||
CompareOutputData(out, correct_out_ptr, size, 0.00001);
|
||||
ASSERT_EQ(0, CompareOutputData(out, correct_out_ptr, size, 0.00001));
|
||||
|
||||
delete[] out;
|
||||
delete[] tile_data0;
|
||||
|
@ -471,7 +471,7 @@ TEST_F(TestArithmeticTestFp32, MinimumTest) {
|
|||
auto tile_data0 = new float[size];
|
||||
auto tile_data1 = new float[size];
|
||||
BroadcastMinimum(in_ptr, add_ptr, tile_data0, tile_data1, out, size, minimum_param);
|
||||
CompareOutputData(out, correct_out_ptr, size, 0.00001);
|
||||
ASSERT_EQ(0, CompareOutputData(out, correct_out_ptr, size, 0.00001));
|
||||
|
||||
delete[] out;
|
||||
delete[] tile_data0;
|
||||
|
@ -507,7 +507,7 @@ TEST_F(TestArithmeticTestFp32, SquaredDifferenceTest) {
|
|||
auto tile_data1 = new float[size];
|
||||
BroadcastSub(in_ptr, add_ptr, tile_data0, tile_data1, out, size, add_param);
|
||||
ElementMul(out, out, out, size);
|
||||
CompareOutputData(out, correct_out_ptr, size, 0.00001);
|
||||
ASSERT_EQ(0, CompareOutputData(out, correct_out_ptr, size, 0.00001));
|
||||
|
||||
delete[] out;
|
||||
delete[] tile_data0;
|
||||
|
@ -581,7 +581,7 @@ TEST_F(TestArithmeticTestFp32, MulFp32) {
|
|||
2.547916, -3.8308315, -0.56281954, 9.992072, -1.8067529, 1.42546};
|
||||
auto correct_out_ptr = correct_out.data();
|
||||
|
||||
CompareOutputData(output.data(), correct_out_ptr, 24, 0.00001);
|
||||
ASSERT_EQ(0, CompareOutputData(output.data(), correct_out_ptr, 24, 0.00001));
|
||||
|
||||
input0_tensor.set_data(nullptr);
|
||||
input1_tensor.set_data(nullptr);
|
||||
|
@ -655,7 +655,7 @@ TEST_F(TestArithmeticTestFp32, MulReluFp32) {
|
|||
2.547916, 0, 0, 9.992072, 0, 1.42546};
|
||||
auto correct_out_ptr = correct_out.data();
|
||||
|
||||
CompareOutputData(output.data(), correct_out_ptr, 24, 0.00001);
|
||||
ASSERT_EQ(0, CompareOutputData(output.data(), correct_out_ptr, 24, 0.00001));
|
||||
|
||||
input0_tensor.set_data(nullptr);
|
||||
input1_tensor.set_data(nullptr);
|
||||
|
@ -728,7 +728,7 @@ TEST_F(TestArithmeticTestFp32, MulRelu6Fp32) {
|
|||
1.1281147, 0, 2.547916, 0, 0, 6, 0, 1.42546};
|
||||
auto correct_out_ptr = correct_out.data();
|
||||
|
||||
CompareOutputData(output.data(), correct_out_ptr, 24, 0.00001);
|
||||
ASSERT_EQ(0, CompareOutputData(output.data(), correct_out_ptr, 24, 0.00001));
|
||||
|
||||
input0_tensor.set_data(nullptr);
|
||||
input1_tensor.set_data(nullptr);
|
||||
|
@ -753,7 +753,7 @@ TEST_F(TestArithmeticTestFp32, MulInt0) {
|
|||
|
||||
int correct_data[12] = {0, 2, 2, 9, 8, 5, 18, 14, 8, 27, 20, 11};
|
||||
|
||||
CompareOutputData(out_data, correct_data, 12, err_tol);
|
||||
ASSERT_EQ(0, CompareOutputData(out_data, correct_data, 12, err_tol));
|
||||
}
|
||||
|
||||
TEST_F(TestArithmeticTestFp32, MulInt1) {
|
||||
|
@ -774,7 +774,7 @@ TEST_F(TestArithmeticTestFp32, MulInt1) {
|
|||
|
||||
int correct_data[12] = {0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22};
|
||||
|
||||
CompareOutputData(out_data, correct_data, 12, err_tol);
|
||||
ASSERT_EQ(0, CompareOutputData(out_data, correct_data, 12, err_tol));
|
||||
}
|
||||
|
||||
TEST_F(TestArithmeticTestFp32, MulInt2) {
|
||||
|
@ -795,7 +795,7 @@ TEST_F(TestArithmeticTestFp32, MulInt2) {
|
|||
|
||||
int correct_data[12] = {0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22};
|
||||
|
||||
CompareOutputData(out_data, correct_data, 12, err_tol);
|
||||
ASSERT_EQ(0, CompareOutputData(out_data, correct_data, 12, err_tol));
|
||||
}
|
||||
|
||||
TEST_F(TestArithmeticTestFp32, MulInt3) {
|
||||
|
@ -816,7 +816,7 @@ TEST_F(TestArithmeticTestFp32, MulInt3) {
|
|||
|
||||
int correct_data[12] = {0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22};
|
||||
|
||||
CompareOutputData(out_data, correct_data, 12, err_tol);
|
||||
ASSERT_EQ(0, CompareOutputData(out_data, correct_data, 12, err_tol));
|
||||
}
|
||||
|
||||
TEST_F(TestArithmeticTestFp32, MulReluInt0) {
|
||||
|
@ -837,7 +837,7 @@ TEST_F(TestArithmeticTestFp32, MulReluInt0) {
|
|||
|
||||
int correct_data[12] = {0, 1, 2, 0, 4, 5, 0, 7, 8, 0, 10, 11};
|
||||
|
||||
CompareOutputData(out_data, correct_data, 12, err_tol);
|
||||
ASSERT_EQ(0, CompareOutputData(out_data, correct_data, 12, err_tol));
|
||||
}
|
||||
|
||||
TEST_F(TestArithmeticTestFp32, MulReluInt1) {
|
||||
|
@ -858,7 +858,7 @@ TEST_F(TestArithmeticTestFp32, MulReluInt1) {
|
|||
|
||||
int correct_data[12] = {0, 0, 0, 0, 0, 0, 6, 7, 8, 9, 10, 11};
|
||||
|
||||
CompareOutputData(out_data, correct_data, 12, err_tol);
|
||||
ASSERT_EQ(0, CompareOutputData(out_data, correct_data, 12, err_tol));
|
||||
}
|
||||
|
||||
TEST_F(TestArithmeticTestFp32, MulReluInt2) {
|
||||
|
@ -879,7 +879,7 @@ TEST_F(TestArithmeticTestFp32, MulReluInt2) {
|
|||
|
||||
int correct_data[12] = {0, 0, 0, 0, 0, 0, 6, 7, 8, 9, 10, 11};
|
||||
|
||||
CompareOutputData(out_data, correct_data, 12, err_tol);
|
||||
ASSERT_EQ(0, CompareOutputData(out_data, correct_data, 12, err_tol));
|
||||
}
|
||||
|
||||
TEST_F(TestArithmeticTestFp32, MulReluInt3) {
|
||||
|
@ -900,7 +900,7 @@ TEST_F(TestArithmeticTestFp32, MulReluInt3) {
|
|||
|
||||
int correct_data[12] = {0, 0, 0, 0, 0, 0, 6, 7, 8, 9, 10, 11};
|
||||
|
||||
CompareOutputData(out_data, correct_data, 12, err_tol);
|
||||
ASSERT_EQ(0, CompareOutputData(out_data, correct_data, 12, err_tol));
|
||||
}
|
||||
|
||||
TEST_F(TestArithmeticTestFp32, MulRelu6Int0) {
|
||||
|
@ -921,7 +921,7 @@ TEST_F(TestArithmeticTestFp32, MulRelu6Int0) {
|
|||
|
||||
int correct_data[12] = {0, 1, 2, 0, 4, 5, 0, 6, 6, 0, 6, 6};
|
||||
|
||||
CompareOutputData(out_data, correct_data, 12, err_tol);
|
||||
ASSERT_EQ(0, CompareOutputData(out_data, correct_data, 12, err_tol));
|
||||
}
|
||||
|
||||
TEST_F(TestArithmeticTestFp32, MulRelu6Int1) {
|
||||
|
@ -942,7 +942,7 @@ TEST_F(TestArithmeticTestFp32, MulRelu6Int1) {
|
|||
|
||||
int correct_data[12] = {0, 0, 0, 0, 0, 0, 6, 6, 6, 6, 6, 6};
|
||||
|
||||
CompareOutputData(out_data, correct_data, 12, err_tol);
|
||||
ASSERT_EQ(0, CompareOutputData(out_data, correct_data, 12, err_tol));
|
||||
}
|
||||
|
||||
TEST_F(TestArithmeticTestFp32, MulRelu6Int2) {
|
||||
|
@ -963,7 +963,7 @@ TEST_F(TestArithmeticTestFp32, MulRelu6Int2) {
|
|||
|
||||
int correct_data[12] = {0, 0, 0, 0, 0, 0, 6, 6, 6, 6, 6, 6};
|
||||
|
||||
CompareOutputData(out_data, correct_data, 12, err_tol);
|
||||
ASSERT_EQ(0, CompareOutputData(out_data, correct_data, 12, err_tol));
|
||||
}
|
||||
|
||||
TEST_F(TestArithmeticTestFp32, MulRelu6Int3) {
|
||||
|
@ -984,7 +984,7 @@ TEST_F(TestArithmeticTestFp32, MulRelu6Int3) {
|
|||
|
||||
int correct_data[12] = {0, 0, 0, 0, 0, 0, 6, 6, 6, 6, 6, 6};
|
||||
|
||||
CompareOutputData(out_data, correct_data, 12, err_tol);
|
||||
ASSERT_EQ(0, CompareOutputData(out_data, correct_data, 12, err_tol));
|
||||
}
|
||||
|
||||
TEST_F(TestArithmeticTestFp32, AddReluFp32) {
|
||||
|
@ -1053,7 +1053,7 @@ TEST_F(TestArithmeticTestFp32, AddReluFp32) {
|
|||
11.572254, 9.565813, 1.6258626, 7.629906, 0, 4.0682936, 0, 0, 13.641247, 0, 3.548678};
|
||||
auto correct_out_ptr = correct_out.data();
|
||||
|
||||
CompareOutputData(output.data(), correct_out_ptr, 24, 0.00001);
|
||||
ASSERT_EQ(0, CompareOutputData(output.data(), correct_out_ptr, 24, 0.00001));
|
||||
|
||||
input0_tensor.set_data(nullptr);
|
||||
input1_tensor.set_data(nullptr);
|
||||
|
@ -1125,7 +1125,7 @@ TEST_F(TestArithmeticTestFp32, AddRelu6Fp32) {
|
|||
0, 6, 6, 1.6258626, 6, 0, 4.0682936, 0, 0, 6, 0, 3.548678};
|
||||
auto correct_out_ptr = correct_out.data();
|
||||
|
||||
CompareOutputData(output.data(), correct_out_ptr, 24, 0.00001);
|
||||
ASSERT_EQ(0, CompareOutputData(output.data(), correct_out_ptr, 24, 0.00001));
|
||||
|
||||
input0_tensor.set_data(nullptr);
|
||||
input1_tensor.set_data(nullptr);
|
||||
|
@ -1199,7 +1199,7 @@ TEST_F(TestArithmeticTestFp32, DivReluFp32) {
|
|||
5.56195764, 0, 0, 0, 0, 0.71874648};
|
||||
auto correct_out_ptr = correct_out.data();
|
||||
|
||||
CompareOutputData(output.data(), correct_out_ptr, 24, 0.00001);
|
||||
ASSERT_EQ(0, CompareOutputData(output.data(), correct_out_ptr, 24, 0.00001));
|
||||
|
||||
input0_tensor.set_data(nullptr);
|
||||
input1_tensor.set_data(nullptr);
|
||||
|
@ -1271,7 +1271,7 @@ TEST_F(TestArithmeticTestFp32, DivRelu6Fp32) {
|
|||
0, 0, 6, 0.28698101, 4.01059523, 0.53567243, 5.56195764, 0, 0, 0, 0, 0.71874648};
|
||||
auto correct_out_ptr = correct_out.data();
|
||||
|
||||
CompareOutputData(output.data(), correct_out_ptr, 24, 0.00001);
|
||||
ASSERT_EQ(0, CompareOutputData(output.data(), correct_out_ptr, 24, 0.00001));
|
||||
|
||||
input0_tensor.set_data(nullptr);
|
||||
input1_tensor.set_data(nullptr);
|
||||
|
@ -1341,7 +1341,7 @@ TEST_F(TestArithmeticTestFp32, EqualFp32) {
|
|||
std::vector<float> correct_out = {0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1};
|
||||
auto correct_out_ptr = correct_out.data();
|
||||
|
||||
CompareOutputData(output.data(), correct_out_ptr, 24, 0.00001);
|
||||
ASSERT_EQ(0, CompareOutputData(output.data(), correct_out_ptr, 24, 0.00001));
|
||||
|
||||
input0_tensor.set_data(nullptr);
|
||||
input1_tensor.set_data(nullptr);
|
||||
|
|
|
@ -39,7 +39,7 @@ TEST_F(BatchToSpaceTestFp32, BatchToSpaceTest1) {
|
|||
std::cout << output[i] << " ";
|
||||
}
|
||||
std::cout << "\n";
|
||||
CompareOutputData(output, expect_out, kOutSize, 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(output, expect_out, kOutSize, 0.000001));
|
||||
}
|
||||
|
||||
TEST_F(BatchToSpaceTestFp32, BatchToSpaceTest_crop_1) {
|
||||
|
@ -57,7 +57,7 @@ TEST_F(BatchToSpaceTestFp32, BatchToSpaceTest_crop_1) {
|
|||
std::cout << output[i] << " ";
|
||||
}
|
||||
std::cout << "\n";
|
||||
CompareOutputData(output, expect_out, kOutSize, 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(output, expect_out, kOutSize, 0.000001));
|
||||
}
|
||||
|
||||
TEST_F(BatchToSpaceTestFp32, BatchToSpaceTest2) {
|
||||
|
@ -76,7 +76,7 @@ TEST_F(BatchToSpaceTestFp32, BatchToSpaceTest2) {
|
|||
std::cout << output[i] << " ";
|
||||
}
|
||||
std::cout << "\n";
|
||||
CompareOutputData(output, expect_out, kOutSize, 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(output, expect_out, kOutSize, 0.000001));
|
||||
}
|
||||
|
||||
TEST_F(BatchToSpaceTestFp32, BatchToSpaceTest_crop_2) {
|
||||
|
@ -95,7 +95,7 @@ TEST_F(BatchToSpaceTestFp32, BatchToSpaceTest_crop_2) {
|
|||
std::cout << output[i] << " ";
|
||||
}
|
||||
std::cout << "\n";
|
||||
CompareOutputData(output, expect_out, kOutSize, 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(output, expect_out, kOutSize, 0.000001));
|
||||
}
|
||||
|
||||
TEST_F(BatchToSpaceTestFp32, BatchToSpaceTest3) {
|
||||
|
@ -118,7 +118,7 @@ TEST_F(BatchToSpaceTestFp32, BatchToSpaceTest3) {
|
|||
std::cout << output[i] << " ";
|
||||
}
|
||||
std::cout << "\n";
|
||||
CompareOutputData(output, expect_out, kOutSize, 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(output, expect_out, kOutSize, 0.000001));
|
||||
}
|
||||
|
||||
TEST_F(BatchToSpaceTestFp32, BatchToSpaceTest_crop_3) {
|
||||
|
@ -139,7 +139,7 @@ TEST_F(BatchToSpaceTestFp32, BatchToSpaceTest_crop_3) {
|
|||
std::cout << output[i] << " ";
|
||||
}
|
||||
std::cout << "\n";
|
||||
CompareOutputData(output, expect_out, kOutSize, 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(output, expect_out, kOutSize, 0.000001));
|
||||
}
|
||||
|
||||
TEST_F(BatchToSpaceTestFp32, BatchToSpaceTest4) {
|
||||
|
@ -164,7 +164,7 @@ TEST_F(BatchToSpaceTestFp32, BatchToSpaceTest4) {
|
|||
std::cout << output[i] << " ";
|
||||
}
|
||||
std::cout << "\n";
|
||||
CompareOutputData(output, expect_out, kOutSize, 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(output, expect_out, kOutSize, 0.000001));
|
||||
}
|
||||
|
||||
TEST_F(BatchToSpaceTestFp32, BatchToSpaceTest_crop_4) {
|
||||
|
@ -187,7 +187,7 @@ TEST_F(BatchToSpaceTestFp32, BatchToSpaceTest_crop_4) {
|
|||
std::cout << output[i] << " ";
|
||||
}
|
||||
std::cout << "\n";
|
||||
CompareOutputData(output, expect_out, kOutSize, 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(output, expect_out, kOutSize, 0.000001));
|
||||
}
|
||||
|
||||
} // namespace mindspore
|
||||
|
|
|
@ -69,7 +69,7 @@ TEST_F(TestBatchnormFp32, BNTest) {
|
|||
std::cout << output[i] << " ,";
|
||||
}
|
||||
std::cout << std::endl;
|
||||
CompareOutputData(output.data(), corr_out.data(), output0_tensor.ElementsNum(), 0.001);
|
||||
ASSERT_EQ(0, CompareOutputData(output.data(), corr_out.data(), output0_tensor.ElementsNum(), 0.001));
|
||||
|
||||
input0_tensor.set_data(nullptr);
|
||||
input1_tensor.set_data(nullptr);
|
||||
|
@ -125,7 +125,7 @@ TEST_F(TestBatchnormFp32, FusedBNTest) {
|
|||
std::cout << output[i] << " ,";
|
||||
}
|
||||
std::cout << std::endl;
|
||||
CompareOutputData(output.data(), corr_out.data(), output0.ElementsNum(), 0.001);
|
||||
ASSERT_EQ(0, CompareOutputData(output.data(), corr_out.data(), output0.ElementsNum(), 0.001));
|
||||
|
||||
input0.set_data(nullptr);
|
||||
input1.set_data(nullptr);
|
||||
|
@ -176,7 +176,7 @@ TEST_F(TestBatchnormFp32, easyTest) {
|
|||
std::cout << output[i] << " ,";
|
||||
}
|
||||
std::cout << std::endl;
|
||||
CompareOutputData(output.data(), corr_out.data(), output0.ElementsNum(), 0.001);
|
||||
ASSERT_EQ(0, CompareOutputData(output.data(), corr_out.data(), output0.ElementsNum(), 0.001));
|
||||
|
||||
input0.set_data(nullptr);
|
||||
input1.set_data(nullptr);
|
||||
|
|
|
@ -63,7 +63,7 @@ TEST_F(TestConstantOfShapeFp32, Simple) {
|
|||
float *output = reinterpret_cast<float *>(outputs_[0]->MutableData());
|
||||
for (int i = 0; i < 8; ++i) printf("%f ", output[i]);
|
||||
printf("\n");
|
||||
CompareOutputData(reinterpret_cast<float *>(outputs_[0]->MutableData()), correct, total_size, 0.0001);
|
||||
ASSERT_EQ(0, CompareOutputData(reinterpret_cast<float *>(outputs_[0]->MutableData()), correct, total_size, 0.0001));
|
||||
delete op;
|
||||
for (auto t : inputs_) delete t;
|
||||
for (auto t : outputs_) delete t;
|
||||
|
|
|
@ -27,7 +27,7 @@ using mindspore::lite::Tensor;
|
|||
|
||||
class TestConv1x1Fp32 : public mindspore::CommonTest {
|
||||
public:
|
||||
TestConv1x1Fp32() {}
|
||||
TestConv1x1Fp32() = default;
|
||||
};
|
||||
|
||||
TEST_F(TestConv1x1Fp32, Input1x1PrePack1) {
|
||||
|
@ -54,7 +54,7 @@ TEST_F(TestConv1x1Fp32, Input1x1PrePack1) {
|
|||
|
||||
float out[20] = {0};
|
||||
Conv1x1InputPack(in, out, conv_param, sizeof(float));
|
||||
EXPECT_EQ(0, lite::CompareOutputData(out, 20, correct, 20));
|
||||
EXPECT_EQ(0, CompareOutputData(out, correct, 20));
|
||||
delete conv_param;
|
||||
}
|
||||
|
||||
|
@ -95,7 +95,7 @@ TEST_F(TestConv1x1Fp32, Input1x1PrePack2) {
|
|||
|
||||
float out[28] = {0};
|
||||
Conv1x1InputPack(in, out, conv_param, sizeof(float));
|
||||
CompareOutputData(out, correct, 28, 0.0001);
|
||||
ASSERT_EQ(0, CompareOutputData(out, correct, 28, 0.0001));
|
||||
delete conv_param;
|
||||
}
|
||||
|
||||
|
@ -114,7 +114,7 @@ TEST_F(TestConv1x1Fp32, Input1x1PrePack3) {
|
|||
-5.052577, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0};
|
||||
|
||||
Conv1x1InputPack(in, out, conv_param, sizeof(float));
|
||||
EXPECT_EQ(0, lite::CompareOutputData(out, 18, correct, 18));
|
||||
EXPECT_EQ(0, CompareOutputData(out, correct, 18));
|
||||
delete conv_param;
|
||||
}
|
||||
|
||||
|
@ -136,12 +136,12 @@ TEST_F(TestConv1x1Fp32, Input1x1PrePack4) {
|
|||
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0};
|
||||
float out[54] = {0};
|
||||
Conv1x1InputPack(in, out, conv_param, sizeof(float));
|
||||
EXPECT_EQ(0, lite::CompareOutputData(out, 54, correct, 54));
|
||||
EXPECT_EQ(0, CompareOutputData(out, correct, 54));
|
||||
delete conv_param;
|
||||
}
|
||||
|
||||
TEST_F(TestConv1x1Fp32, Conv1x1WeightTest1) {
|
||||
ConvParameter *conv_param = new ConvParameter();
|
||||
auto *conv_param = new ConvParameter();
|
||||
float in[] = {0.214637, 0.3815, 0.811557, 0.982146, 0.09123, 0.687198, 0.02742, 0.3360, 0.853275,
|
||||
0.674123, 0.81337, 0.57188, 0.706416, 0.2740942, 0.9045, 0.07155, 0.130864, 0.037712,
|
||||
0.5369175, 0.97283, 0.92133, 0.3588165, 0.7432479, 0.7886823, 0.870324, 0.230946, 0.343969,
|
||||
|
@ -166,13 +166,13 @@ TEST_F(TestConv1x1Fp32, Conv1x1WeightTest1) {
|
|||
conv_param->output_channel_ = 7;
|
||||
float out[96] = {0};
|
||||
Pack1x1WeightFp32(in, out, conv_param);
|
||||
EXPECT_EQ(0, lite::CompareOutputData(out, 96, co, 96));
|
||||
EXPECT_EQ(0, CompareOutputData(out, co, 96));
|
||||
delete conv_param;
|
||||
}
|
||||
|
||||
int Conv1x1TestInit1(std::vector<lite::Tensor *> *inputs_, std::vector<lite::Tensor *> *outputs_,
|
||||
ConvParameter *conv_param, float **correct) {
|
||||
lite::Tensor *in_t = new lite::Tensor(kNumberTypeFloat, {1, 2, 3, 4}, schema::Format_NHWC, lite::Tensor::VAR);
|
||||
auto *in_t = new lite::Tensor(kNumberTypeFloat, {1, 2, 3, 4}, schema::Format_NHWC, lite::Tensor::VAR);
|
||||
in_t->MallocData();
|
||||
float in[] = {12.216284, 3.3466918, 15.327419, 5.234958, 0.804376, 9.952188, 14.727955, -8.080715,
|
||||
13.71383, 8.055829, 6.5845337, -9.25232, -4.24519, 11.550042, 9.262012, 1.2780352,
|
||||
|
@ -180,21 +180,20 @@ int Conv1x1TestInit1(std::vector<lite::Tensor *> *inputs_, std::vector<lite::Ten
|
|||
memcpy(in_t->MutableData(), in, sizeof(float) * 24);
|
||||
inputs_->push_back(in_t);
|
||||
|
||||
lite::Tensor *weight_t =
|
||||
new lite::Tensor(kNumberTypeFloat, {3, 1, 1, 4}, schema::Format_NHWC, lite::Tensor::CONST_TENSOR);
|
||||
auto *weight_t = new lite::Tensor(kNumberTypeFloat, {3, 1, 1, 4}, schema::Format_NHWC, lite::Tensor::CONST_TENSOR);
|
||||
weight_t->MallocData();
|
||||
float weight[] = {-0.7308652, 0.5257509, -0.87825793, -1.123181, -1.2206168, 0.562695,
|
||||
1.5382664, -0.5020635, 0.8591602, -0.26410004, 1.1262615, 0.073132955}; /* nhwc */
|
||||
memcpy(weight_t->MutableData(), weight, sizeof(float) * 12);
|
||||
inputs_->push_back(weight_t);
|
||||
|
||||
lite::Tensor *bias_t = new lite::Tensor(kNumberTypeFloat, {3}, schema::Format_NHWC, lite::Tensor::CONST_TENSOR);
|
||||
auto *bias_t = new lite::Tensor(kNumberTypeFloat, {3}, schema::Format_NHWC, lite::Tensor::CONST_TENSOR);
|
||||
bias_t->MallocData();
|
||||
float bias[] = {2, 2, 2};
|
||||
memcpy(bias_t->MutableData(), bias, sizeof(float) * 3);
|
||||
inputs_->push_back(bias_t);
|
||||
|
||||
lite::Tensor *out_t = new lite::Tensor(kNumberTypeFloat, {1, 2, 3, 3}, schema::Format_NHWC, lite::Tensor::VAR);
|
||||
auto *out_t = new lite::Tensor(kNumberTypeFloat, {1, 2, 3, 3}, schema::Format_NHWC, lite::Tensor::VAR);
|
||||
out_t->MallocData();
|
||||
outputs_->push_back(out_t);
|
||||
|
||||
|
@ -214,18 +213,18 @@ TEST_F(TestConv1x1Fp32, Conv1x1Test1) {
|
|||
std::vector<lite::Tensor *> inputs_;
|
||||
std::vector<lite::Tensor *> outputs_;
|
||||
auto conv_param = new ConvParameter();
|
||||
lite::InnerContext *ctx = new lite::InnerContext();
|
||||
auto *ctx = new lite::InnerContext();
|
||||
ctx->thread_num_ = 1;
|
||||
ASSERT_EQ(lite::RET_OK, ctx->Init());
|
||||
float *correct;
|
||||
int total_size = Conv1x1TestInit1(&inputs_, &outputs_, conv_param, &correct);
|
||||
kernel::Convolution1x1CPUKernel *conv1x1 =
|
||||
auto *conv1x1 =
|
||||
new kernel::Convolution1x1CPUKernel(reinterpret_cast<OpParameter *>(conv_param), inputs_, outputs_, ctx, nullptr);
|
||||
|
||||
conv1x1->Init();
|
||||
conv1x1->Run();
|
||||
|
||||
CompareOutputData(reinterpret_cast<float *>(outputs_[0]->MutableData()), correct, total_size, 0.0001);
|
||||
ASSERT_EQ(0, CompareOutputData(reinterpret_cast<float *>(outputs_[0]->MutableData()), correct, total_size, 0.0001));
|
||||
delete conv_param;
|
||||
delete conv1x1;
|
||||
for (auto t : inputs_) delete t;
|
||||
|
@ -236,29 +235,28 @@ TEST_F(TestConv1x1Fp32, Conv1x1Test1) {
|
|||
int Conv1x1TestInit2(std::vector<lite::Tensor *> *inputs_, std::vector<lite::Tensor *> *outputs_,
|
||||
ConvParameter *conv_param, float **correct) {
|
||||
size_t buffer_size;
|
||||
lite::Tensor *in_t = new lite::Tensor(kNumberTypeFloat, {1, 300, 300, 24}, schema::Format_NHWC, lite::Tensor::VAR);
|
||||
auto *in_t = new lite::Tensor(kNumberTypeFloat, {1, 300, 300, 24}, schema::Format_NHWC, lite::Tensor::VAR);
|
||||
in_t->MallocData();
|
||||
std::string input_path = "./conv/conv1x1fp32_input1_nhwc.bin";
|
||||
auto in = reinterpret_cast<float *>(mindspore::lite::ReadFile(input_path.c_str(), &buffer_size));
|
||||
memcpy(in_t->MutableData(), in, buffer_size);
|
||||
inputs_->push_back(in_t);
|
||||
|
||||
lite::Tensor *weight_t =
|
||||
new lite::Tensor(kNumberTypeFloat, {40, 1, 1, 24}, schema::Format_NHWC, lite::Tensor::CONST_TENSOR);
|
||||
auto *weight_t = new lite::Tensor(kNumberTypeFloat, {40, 1, 1, 24}, schema::Format_NHWC, lite::Tensor::CONST_TENSOR);
|
||||
weight_t->MallocData();
|
||||
std::string weight_path = "./conv/conv1x1fp32_weight1_nhwc.bin";
|
||||
auto weight = reinterpret_cast<float *>(mindspore::lite::ReadFile(weight_path.c_str(), &buffer_size));
|
||||
memcpy(weight_t->MutableData(), weight, buffer_size);
|
||||
inputs_->push_back(weight_t);
|
||||
|
||||
lite::Tensor *bias_t = new lite::Tensor(kNumberTypeFloat, {40}, schema::Format_NHWC, lite::Tensor::CONST_TENSOR);
|
||||
auto *bias_t = new lite::Tensor(kNumberTypeFloat, {40}, schema::Format_NHWC, lite::Tensor::CONST_TENSOR);
|
||||
bias_t->MallocData();
|
||||
std::string bias_path = "./conv/conv1x1fp32_bias1_nhwc.bin";
|
||||
auto bias = mindspore::lite::ReadFile(bias_path.c_str(), &buffer_size);
|
||||
memcpy(bias_t->MutableData(), bias, buffer_size);
|
||||
inputs_->push_back(bias_t);
|
||||
|
||||
lite::Tensor *out_t = new lite::Tensor(kNumberTypeFloat, {1, 300, 300, 40}, schema::Format_NHWC, lite::Tensor::VAR);
|
||||
auto *out_t = new lite::Tensor(kNumberTypeFloat, {1, 300, 300, 40}, schema::Format_NHWC, lite::Tensor::VAR);
|
||||
out_t->MallocData();
|
||||
outputs_->push_back(out_t);
|
||||
|
||||
|
@ -279,17 +277,17 @@ TEST_F(TestConv1x1Fp32, Conv1x1Test2) {
|
|||
std::vector<lite::Tensor *> inputs_;
|
||||
std::vector<lite::Tensor *> outputs_;
|
||||
auto conv_param = new ConvParameter();
|
||||
lite::InnerContext *ctx = new lite::InnerContext();
|
||||
auto *ctx = new lite::InnerContext();
|
||||
ctx->thread_num_ = 2;
|
||||
ASSERT_EQ(lite::RET_OK, ctx->Init());
|
||||
float *correct;
|
||||
int total_size = Conv1x1TestInit2(&inputs_, &outputs_, conv_param, &correct);
|
||||
kernel::Convolution1x1CPUKernel *conv1x1 =
|
||||
auto *conv1x1 =
|
||||
new kernel::Convolution1x1CPUKernel(reinterpret_cast<OpParameter *>(conv_param), inputs_, outputs_, ctx, nullptr);
|
||||
|
||||
conv1x1->Init();
|
||||
conv1x1->Run();
|
||||
CompareOutputData(reinterpret_cast<float *>(outputs_[0]->MutableData()), correct, total_size, 0.0001);
|
||||
ASSERT_EQ(0, CompareOutputData(reinterpret_cast<float *>(outputs_[0]->MutableData()), correct, total_size, 0.0001));
|
||||
|
||||
/* running warm up */
|
||||
for (int i = 0; i < 0; i++) {
|
||||
|
|
|
@ -135,14 +135,14 @@ TEST_F(TestConvolutionDwFp32, ConvDwFp32Accuracy) {
|
|||
auto correct_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(output_path.c_str(), &output_size));
|
||||
|
||||
// compare
|
||||
CompareOutputData(output_ptr, correct_data, outputs[0]->ElementsNum(), 0.0001);
|
||||
ASSERT_EQ(0, CompareOutputData(output_ptr, correct_data, outputs[0]->ElementsNum(), 0.0001));
|
||||
|
||||
delete conv_param;
|
||||
for (unsigned int i = 0; i < inputs.size(); i++) {
|
||||
delete inputs[i];
|
||||
for (auto &input : inputs) {
|
||||
delete input;
|
||||
}
|
||||
for (unsigned int i = 0; i < outputs.size(); i++) {
|
||||
delete outputs[i];
|
||||
for (auto &output : outputs) {
|
||||
delete output;
|
||||
}
|
||||
delete kernel;
|
||||
delete correct_data;
|
||||
|
|
|
@ -42,7 +42,7 @@ TEST_F(CropTestFp32, CropTest1) {
|
|||
std::cout << output[i] << " ";
|
||||
}
|
||||
std::cout << "\n";
|
||||
CompareOutputData(output, expect_out, kOutSize, 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(output, expect_out, kOutSize, 0.000001));
|
||||
}
|
||||
|
||||
TEST_F(CropTestFp32, CropTest2) {
|
||||
|
@ -65,7 +65,7 @@ TEST_F(CropTestFp32, CropTest2) {
|
|||
std::cout << output[i] << " ";
|
||||
}
|
||||
std::cout << "\n";
|
||||
CompareOutputData(output, expect_out, kOutSize, 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(output, expect_out, kOutSize, 0.000001));
|
||||
}
|
||||
|
||||
TEST_F(CropTestFp32, CropTest3) {
|
||||
|
@ -85,7 +85,7 @@ TEST_F(CropTestFp32, CropTest3) {
|
|||
std::cout << output[i] << " ";
|
||||
}
|
||||
std::cout << "\n";
|
||||
CompareOutputData(output, expect_out, kOutSize, 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(output, expect_out, kOutSize, 0.000001));
|
||||
}
|
||||
|
||||
TEST_F(CropTestFp32, CropTest4) {
|
||||
|
@ -106,7 +106,7 @@ TEST_F(CropTestFp32, CropTest4) {
|
|||
std::cout << output[i] << " ";
|
||||
}
|
||||
std::cout << "\n";
|
||||
CompareOutputData(output, expect_out, kOutSize, 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(output, expect_out, kOutSize, 0.000001));
|
||||
}
|
||||
|
||||
TEST_F(CropTestFp32, CropTest5) {
|
||||
|
@ -127,7 +127,7 @@ TEST_F(CropTestFp32, CropTest5) {
|
|||
std::cout << output[i] << " ";
|
||||
}
|
||||
std::cout << "\n";
|
||||
CompareOutputData(output, expect_out, kOutSize, 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(output, expect_out, kOutSize, 0.000001));
|
||||
}
|
||||
|
||||
TEST_F(CropTestFp32, CropTest6) {
|
||||
|
@ -145,11 +145,11 @@ TEST_F(CropTestFp32, CropTest6) {
|
|||
crop_param.offset_[2] = 0;
|
||||
crop_param.offset_[3] = 0;
|
||||
Crop4DNoParallel(input, output, in_shape, out_shape, &crop_param);
|
||||
for (int i = 0; i < kOutSize; ++i) {
|
||||
std::cout << output[i] << " ";
|
||||
for (float i : output) {
|
||||
std::cout << i << " ";
|
||||
}
|
||||
std::cout << "\n";
|
||||
CompareOutputData(output, expect_out, kOutSize, 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(output, expect_out, kOutSize, 0.000001));
|
||||
}
|
||||
|
||||
TEST_F(CropTestFp32, CropTest7) {
|
||||
|
@ -164,11 +164,11 @@ TEST_F(CropTestFp32, CropTest7) {
|
|||
crop_param.axis_ = 3;
|
||||
crop_param.offset_[0] = 1;
|
||||
Crop4DNoParallel(input, output, in_shape, out_shape, &crop_param);
|
||||
for (int i = 0; i < kOutSize; ++i) {
|
||||
std::cout << output[i] << " ";
|
||||
for (float i : output) {
|
||||
std::cout << i << " ";
|
||||
}
|
||||
std::cout << "\n";
|
||||
CompareOutputData(output, expect_out, kOutSize, 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(output, expect_out, kOutSize, 0.000001));
|
||||
}
|
||||
|
||||
TEST_F(CropTestFp32, CropTest8) {
|
||||
|
@ -187,11 +187,11 @@ TEST_F(CropTestFp32, CropTest8) {
|
|||
crop_param.op_parameter_.thread_num_ = 2;
|
||||
Crop4D(input, output, in_shape, out_shape, &crop_param, 0);
|
||||
Crop4D(input, output, in_shape, out_shape, &crop_param, 1);
|
||||
for (int i = 0; i < kOutSize; ++i) {
|
||||
std::cout << output[i] << " ";
|
||||
for (float i : output) {
|
||||
std::cout << i << " ";
|
||||
}
|
||||
std::cout << "\n";
|
||||
CompareOutputData(output, expect_out, kOutSize, 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(output, expect_out, kOutSize, 0.000001));
|
||||
}
|
||||
|
||||
TEST_F(CropTestFp32, CropTest9) {
|
||||
|
@ -213,11 +213,11 @@ TEST_F(CropTestFp32, CropTest9) {
|
|||
crop_param.op_parameter_.thread_num_ = 2;
|
||||
Crop4D(input, output, in_shape, out_shape, &crop_param, 0);
|
||||
Crop4D(input, output, in_shape, out_shape, &crop_param, 1);
|
||||
for (int i = 0; i < kOutSize; ++i) {
|
||||
std::cout << output[i] << " ";
|
||||
for (float i : output) {
|
||||
std::cout << i << " ";
|
||||
}
|
||||
std::cout << "\n";
|
||||
CompareOutputData(output, expect_out, kOutSize, 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(output, expect_out, kOutSize, 0.000001));
|
||||
}
|
||||
|
||||
TEST_F(CropTestFp32, CropTest10) {
|
||||
|
@ -237,11 +237,11 @@ TEST_F(CropTestFp32, CropTest10) {
|
|||
crop_param.op_parameter_.thread_num_ = 2;
|
||||
Crop4D(input, output, in_shape, out_shape, &crop_param, 1);
|
||||
Crop4D(input, output, in_shape, out_shape, &crop_param, 0);
|
||||
for (int i = 0; i < kOutSize; ++i) {
|
||||
std::cout << output[i] << " ";
|
||||
for (float i : output) {
|
||||
std::cout << i << " ";
|
||||
}
|
||||
std::cout << "\n";
|
||||
CompareOutputData(output, expect_out, kOutSize, 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(output, expect_out, kOutSize, 0.000001));
|
||||
}
|
||||
|
||||
TEST_F(CropTestFp32, CropTest11) {
|
||||
|
@ -277,11 +277,11 @@ TEST_F(CropTestFp32, CropTest11) {
|
|||
kernel->Init();
|
||||
kernel->Run();
|
||||
|
||||
float *output = reinterpret_cast<float *>(outputs[0]->MutableData());
|
||||
auto *output = reinterpret_cast<float *>(outputs[0]->MutableData());
|
||||
for (int i = 0; i < kOutSize; ++i) {
|
||||
std::cout << output[i] << " ";
|
||||
}
|
||||
std::cout << "\n";
|
||||
CompareOutputData(output, expect_out, kOutSize, 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(output, expect_out, kOutSize, 0.000001));
|
||||
}
|
||||
} // namespace mindspore
|
||||
|
|
|
@ -25,7 +25,7 @@
|
|||
namespace mindspore {
|
||||
class TestDeConvolutionFp32 : public mindspore::CommonTest {
|
||||
public:
|
||||
TestDeConvolutionFp32() {}
|
||||
TestDeConvolutionFp32() = default;
|
||||
};
|
||||
|
||||
TEST_F(TestDeConvolutionFp32, DeConvWeightC4x4Pack1) {
|
||||
|
@ -76,7 +76,7 @@ TEST_F(TestDeConvolutionFp32, DeConvWeightC4x4Pack1) {
|
|||
0.000, 0.000, 0.000, 0.00};
|
||||
float dst[256] = {0};
|
||||
PackDeConvWeightFp32(in, dst, 5, 6, 2 * 2);
|
||||
EXPECT_EQ(0, lite::CompareOutputData(dst, 256, co, 256));
|
||||
EXPECT_EQ(0, CompareOutputData(dst, co, 256));
|
||||
}
|
||||
|
||||
TEST_F(TestDeConvolutionFp32, DeConvWeightC4x4Pack2) {
|
||||
|
@ -91,7 +91,7 @@ TEST_F(TestDeConvolutionFp32, DeConvWeightC4x4Pack2) {
|
|||
-0.293, 18.686, 0.0873, 0, 0, 0, 0, 0, 0, 0, 0, 0};
|
||||
float dst[64] = {0};
|
||||
PackDeConvWeightFp32(in, dst, 6, 3, 2 * 1);
|
||||
EXPECT_EQ(0, lite::CompareOutputData(dst, 64, co, 64));
|
||||
EXPECT_EQ(0, CompareOutputData(dst, co, 64));
|
||||
}
|
||||
|
||||
TEST_F(TestDeConvolutionFp32, PostConvFuncC8Test1) {
|
||||
|
@ -108,15 +108,15 @@ TEST_F(TestDeConvolutionFp32, PostConvFuncC8Test1) {
|
|||
|
||||
float no[] = {-8.646674, -4.7133026, -0.11849791, -4.530405, -5.419181, 14.387108, 2.8319538, -8.511095};
|
||||
PostConvFuncFp32C8(in, out, bias, 1, 8, 1, ActType_No);
|
||||
CompareOutputData(out, no, 8, 0.0001);
|
||||
ASSERT_EQ(0, CompareOutputData(out, no, 8, 0.0001));
|
||||
|
||||
float relu[] = {0, 0, 0, 0, 0, 14.387108, 2.8319538, 0};
|
||||
PostConvFuncFp32C8(in, out, bias, 1, 8, 1, ActType_Relu);
|
||||
CompareOutputData(out, relu, 8, 0.0001);
|
||||
ASSERT_EQ(0, CompareOutputData(out, relu, 8, 0.0001));
|
||||
|
||||
float corr_relu6[] = {0, 0, 0, 0, 0, 6, 2.8319538, 0};
|
||||
PostConvFuncFp32C8(in, out, bias, 1, 8, 1, ActType_Relu6);
|
||||
CompareOutputData(out, corr_relu6, 8, 0.0001);
|
||||
ASSERT_EQ(0, CompareOutputData(out, corr_relu6, 8, 0.0001));
|
||||
}
|
||||
|
||||
TEST_F(TestDeConvolutionFp32, PostConvFuncC8Test2) {
|
||||
|
@ -134,15 +134,15 @@ TEST_F(TestDeConvolutionFp32, PostConvFuncC8Test2) {
|
|||
float no[] = {-8.646674, 0, -4.7133026, 0, -0.11849791, 0, -4.530405, 0,
|
||||
-5.419181, 0, 14.387108, 0, 2.8319538, 0, -8.511095, 0};
|
||||
PostConvFuncFp32C8(in, out, bias, 1, 8, 2, ActType_No);
|
||||
CompareOutputData(out, no, 16, 0.0001);
|
||||
ASSERT_EQ(0, CompareOutputData(out, no, 16, 0.0001));
|
||||
|
||||
float relu[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 14.387108, 0, 2.8319538, 0, 0, 0};
|
||||
PostConvFuncFp32C8(in, out, bias, 1, 8, 2, ActType_Relu);
|
||||
CompareOutputData(out, relu, 16, 0.0001);
|
||||
ASSERT_EQ(0, CompareOutputData(out, relu, 16, 0.0001));
|
||||
|
||||
float corr_relu6[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 0, 2.8319538, 0, 0, 0};
|
||||
PostConvFuncFp32C8(in, out, bias, 1, 8, 2, ActType_Relu6);
|
||||
CompareOutputData(out, corr_relu6, 16, 0.0001);
|
||||
ASSERT_EQ(0, CompareOutputData(out, corr_relu6, 16, 0.0001));
|
||||
}
|
||||
|
||||
TEST_F(TestDeConvolutionFp32, PostConvFuncC8Test3) {
|
||||
|
@ -161,7 +161,7 @@ TEST_F(TestDeConvolutionFp32, PostConvFuncC8Test3) {
|
|||
11.90631, -4.530405, -0.47735345, -3.7422307, -5.419181, -0.14518678, -8.15199, 14.387108,
|
||||
8.693133, 8.080041, 2.8319538, 7.177942, -4.409286, -8.511095, -5.110127, -4.992582};
|
||||
PostConvFuncFp32C8(in, out, bias, 3, 8, 3, ActType_No);
|
||||
CompareOutputData(out, no, 24, 0.0001);
|
||||
ASSERT_EQ(0, CompareOutputData(out, no, 24, 0.0001));
|
||||
}
|
||||
|
||||
TEST_F(TestDeConvolutionFp32, PostConvFuncC8Test4) {
|
||||
|
@ -179,12 +179,12 @@ TEST_F(TestDeConvolutionFp32, PostConvFuncC8Test4) {
|
|||
float co32[] = {0, 0, 0, 0, 0, 1.2270198, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 14.387108, 8.693133, 0, 0, 2.8319538, 7.177942, 0, 0, 0, 0, 0, 0};
|
||||
PostConvFuncFp32C8(in, out, bias, 2, 8, 4, ActType_Relu);
|
||||
CompareOutputData(out, co32, 32, 0.0001);
|
||||
ASSERT_EQ(0, CompareOutputData(out, co32, 32, 0.0001));
|
||||
|
||||
float co32_relu6[] = {0, 0, 6, 0, 0, 1.2270198, 6, 6, 0, 0, 6, 0.3088621, 0, 0, 0, 0,
|
||||
0, 0, 0, 6, 6, 6, 6, 0, 2.8319538, 6, 0, 6, 0, 0, 0, 0};
|
||||
PostConvFuncFp32C8(in, out, bias, 4, 8, 4, ActType_Relu6);
|
||||
CompareOutputData(out, co32_relu6, 32, 0.0001);
|
||||
ASSERT_EQ(0, CompareOutputData(out, co32_relu6, 32, 0.0001));
|
||||
}
|
||||
|
||||
TEST_F(TestDeConvolutionFp32, PostConvFuncC8Test5) {
|
||||
|
@ -205,19 +205,19 @@ TEST_F(TestDeConvolutionFp32, PostConvFuncC8Test5) {
|
|||
-8.334226, 14.387108, 8.693133, 8.080041, -0.30434704, -3.782834, 2.8319538, 7.177942,
|
||||
-4.409286, 12.194644, -7.0295477, -8.511095, -5.110127, -4.992582, -0.31387085, -2.7594402};
|
||||
PostConvFuncFp32C8(in, out, bias, 5, 8, 5, ActType_No);
|
||||
CompareOutputData(out, no, 40, 0.0001);
|
||||
ASSERT_EQ(0, CompareOutputData(out, no, 40, 0.0001));
|
||||
|
||||
float relu[] = {0, 0, 8.56133, 0, 0, 0, 1.2270198, 17.954533, 11.086085, 0,
|
||||
0, 0, 11.90631, 0.3088621, 11.196218, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 9.464027, 0, 14.387108, 8.693133, 8.080041, 0, 0,
|
||||
2.8319538, 7.177942, 0, 12.194644, 0, 0, 0, 0, 0, 0};
|
||||
PostConvFuncFp32C8(in, out, bias, 5, 8, 5, ActType_Relu);
|
||||
CompareOutputData(out, relu, 40, 0.0001);
|
||||
ASSERT_EQ(0, CompareOutputData(out, relu, 40, 0.0001));
|
||||
|
||||
float corr_relu6[] = {0, 0, 6, 0, 0, 0, 1.2270198, 6, 6, 0, 0, 0, 6, 0.3088621, 6, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 6, 0, 6, 6, 6, 0, 0, 2.8319538, 6, 0, 6, 0, 0, 0, 0, 0, 0};
|
||||
PostConvFuncFp32C8(in, out, bias, 5, 8, 5, ActType_Relu6);
|
||||
CompareOutputData(out, corr_relu6, 40, 0.0001);
|
||||
ASSERT_EQ(0, CompareOutputData(out, corr_relu6, 40, 0.0001));
|
||||
}
|
||||
|
||||
TEST_F(TestDeConvolutionFp32, PostConvFuncC8Test6) {
|
||||
|
@ -231,13 +231,13 @@ TEST_F(TestDeConvolutionFp32, PostConvFuncC8Test6) {
|
|||
float no_3[] = {-9.389655, -5.83877, 7.5724425, 0, 0, 0, -0.8614793, -4.404605, 10.917422, 0, 0, 0,
|
||||
-6.1621623, -0.6315082, -9.140878, 0, 0, 0, 2.0889723, 6.6916203, -5.3981733, 0, 0, 0};
|
||||
PostConvFuncFp32C8(in, out, bias, 3, 4, 6, ActType_No);
|
||||
CompareOutputData(out, no_3, 24, 0.0001);
|
||||
ASSERT_EQ(0, CompareOutputData(out, no_3, 24, 0.0001));
|
||||
|
||||
float no_6[] = {-9.389655, -5.83877, 7.5724425, -1.4675674, -5.456284, 0.7406984, -0.8614793, -4.404605,
|
||||
10.917422, 0.11158327, -5.2733865, -0.96367484, -6.1621623, -0.6315082, -9.140878, 9.266748,
|
||||
13.644127, 8.206812, 2.0889723, 6.6916203, -5.3981733, 11.997365, -9.254076, -5.5964484};
|
||||
PostConvFuncFp32C8(in, out, bias, 6, 4, 6, ActType_No);
|
||||
CompareOutputData(out, no_6, 24, 0.0001);
|
||||
ASSERT_EQ(0, CompareOutputData(out, no_6, 24, 0.0001));
|
||||
}
|
||||
|
||||
TEST_F(TestDeConvolutionFp32, PostConvFuncC8Test7) {
|
||||
|
@ -253,7 +253,7 @@ TEST_F(TestDeConvolutionFp32, PostConvFuncC8Test7) {
|
|||
-6.1621623, -0.6315082, -9.140878, 9.266748, 13.644127, 8.206812, 7.091153,
|
||||
2.0889723, 6.6916203, -5.3981733, 11.997365, -9.254076, -5.5964484, -5.981469};
|
||||
PostConvFuncFp32C8(in, out, bias, 7, 4, 7, ActType_No);
|
||||
CompareOutputData(out, no, 28, 0.0001);
|
||||
ASSERT_EQ(0, CompareOutputData(out, no, 28, 0.0001));
|
||||
}
|
||||
|
||||
TEST_F(TestDeConvolutionFp32, PostConvFuncC8Test8_2) {
|
||||
|
@ -269,7 +269,7 @@ TEST_F(TestDeConvolutionFp32, PostConvFuncC8Test8_2) {
|
|||
-0.8614793, -4.404605, 10.917422, 0.11158327, -5.2733865, -0.96367484, -4.731118, -7.576815,
|
||||
2.0889723, 6.6916203, -5.3981733, 11.997365, -9.254076, -5.5964484, -5.981469, -0.51114964};
|
||||
PostConvFuncFp32C8(in, out, bias, 16, 2, 16, ActType_No);
|
||||
CompareOutputData(out, no, 28, 0.0001);
|
||||
ASSERT_EQ(0, CompareOutputData(out, no, 28, 0.0001));
|
||||
}
|
||||
|
||||
TEST_F(TestDeConvolutionFp32, PostConvFuncC8Test8_4) {
|
||||
|
@ -293,7 +293,7 @@ TEST_F(TestDeConvolutionFp32, PostConvFuncC8Test8_4) {
|
|||
2.0889723, 6.6916203, -5.3981733, 11.997365, -9.254076, -5.5964484, -5.981469, -0.51114964,
|
||||
2.0889723, 6.6916203, -5.3981733, 11.997365, -9.254076, -5.5964484, -5.981469, -0.51114964};
|
||||
PostConvFuncFp32C8(in, out, bias, 16, 4, 16, ActType_No);
|
||||
CompareOutputData(out, no, 64, 0.0001);
|
||||
ASSERT_EQ(0, CompareOutputData(out, no, 64, 0.0001));
|
||||
}
|
||||
|
||||
TEST_F(TestDeConvolutionFp32, PostConvFuncC8Test8_8) {
|
||||
|
@ -317,13 +317,13 @@ TEST_F(TestDeConvolutionFp32, PostConvFuncC8Test8_8) {
|
|||
-6.1621623, -0.6315082, -9.140878, 9.266748, 13.644127, 8.206812, 7.091153, -0.50162584,
|
||||
2.0889723, 6.6916203, -5.3981733, 11.997365, -9.254076, -5.5964484, -5.981469, -0.51114964};
|
||||
PostConvFuncFp32C8(in, out, bias, 8, 8, 8, ActType_No);
|
||||
CompareOutputData(out, no, 64, 0.0001);
|
||||
ASSERT_EQ(0, CompareOutputData(out, no, 64, 0.0001));
|
||||
}
|
||||
|
||||
int DeConvTestInit1(std::vector<lite::Tensor *> *inputs_, std::vector<lite::Tensor *> *outputs_,
|
||||
ConvParameter *conv_param, float **correct) {
|
||||
std::vector<int> in_dims_nhwc = {1, 5, 7, 2};
|
||||
lite::Tensor *in_t =
|
||||
auto *in_t =
|
||||
new lite::Tensor(kNumberTypeFloat, in_dims_nhwc, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
|
||||
in_t->MallocData();
|
||||
float in_nchw[] = {
|
||||
|
@ -339,7 +339,7 @@ int DeConvTestInit1(std::vector<lite::Tensor *> *inputs_, std::vector<lite::Tens
|
|||
inputs_->push_back(in_t);
|
||||
|
||||
std::vector<int> weight_dims_nhwc = {2, 3, 3, 6};
|
||||
lite::Tensor *weight_t =
|
||||
auto *weight_t =
|
||||
new lite::Tensor(kNumberTypeFloat, weight_dims_nhwc, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
|
||||
weight_t->MallocData();
|
||||
float weight_nchw[] = {
|
||||
|
@ -361,15 +361,14 @@ int DeConvTestInit1(std::vector<lite::Tensor *> *inputs_, std::vector<lite::Tens
|
|||
weight_t->Channel());
|
||||
inputs_->push_back(weight_t);
|
||||
|
||||
lite::Tensor *bias_t =
|
||||
new lite::Tensor(kNumberTypeFloat, {6}, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
|
||||
auto *bias_t = new lite::Tensor(kNumberTypeFloat, {6}, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
|
||||
bias_t->MallocData();
|
||||
float bias[] = {-0.19064677, -0.0034778118, 0.63741624, -1.0311537, -1.0288948, 0.71384084};
|
||||
memcpy(bias_t->MutableData(), bias, sizeof(float) * 6);
|
||||
inputs_->push_back(bias_t);
|
||||
|
||||
std::vector<int> output_nhwc_dims = {1, 9, 13, 6};
|
||||
lite::Tensor *out_t =
|
||||
auto *out_t =
|
||||
new lite::Tensor(kNumberTypeFloat, output_nhwc_dims, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
|
||||
out_t->MallocData();
|
||||
outputs_->push_back(out_t);
|
||||
|
@ -476,19 +475,19 @@ int DeConvTestInit1(std::vector<lite::Tensor *> *inputs_, std::vector<lite::Tens
|
|||
TEST_F(TestDeConvolutionFp32, DeConvTest1) {
|
||||
std::vector<lite::Tensor *> inputs_;
|
||||
std::vector<lite::Tensor *> outputs_;
|
||||
ConvParameter *deconv_param = new ConvParameter();
|
||||
lite::InnerContext *ctx = new lite::InnerContext;
|
||||
auto *deconv_param = new ConvParameter();
|
||||
auto *ctx = new lite::InnerContext;
|
||||
ctx->thread_num_ = 1;
|
||||
ASSERT_EQ(lite::RET_OK, ctx->Init());
|
||||
float *correct;
|
||||
int total_size = DeConvTestInit1(&inputs_, &outputs_, deconv_param, &correct);
|
||||
kernel::DeConvolutionCPUKernel *deconv =
|
||||
auto *deconv =
|
||||
new kernel::DeConvolutionCPUKernel(reinterpret_cast<OpParameter *>(deconv_param), inputs_, outputs_, ctx, nullptr);
|
||||
|
||||
deconv->Init();
|
||||
deconv->Run();
|
||||
|
||||
CompareOutputData(reinterpret_cast<float *>(outputs_[0]->MutableData()), correct, total_size, 0.0001);
|
||||
ASSERT_EQ(0, CompareOutputData(reinterpret_cast<float *>(outputs_[0]->MutableData()), correct, total_size, 0.0001));
|
||||
delete deconv_param;
|
||||
delete deconv;
|
||||
for (auto t : inputs_) delete t;
|
||||
|
@ -547,15 +546,15 @@ TEST_F(TestDeConvolutionFp32, DeConvTest2) {
|
|||
auto deconv_param = new ConvParameter();
|
||||
float *correct;
|
||||
int total_size = DeConvTestInit2(&inputs_, &outputs_, deconv_param, &correct);
|
||||
lite::InnerContext *ctx = new lite::InnerContext;
|
||||
auto *ctx = new lite::InnerContext;
|
||||
ctx->thread_num_ = 1;
|
||||
ASSERT_EQ(lite::RET_OK, ctx->Init());
|
||||
kernel::DeConvolutionCPUKernel *deconv =
|
||||
auto *deconv =
|
||||
new kernel::DeConvolutionCPUKernel(reinterpret_cast<OpParameter *>(deconv_param), inputs_, outputs_, ctx, nullptr);
|
||||
|
||||
deconv->Init();
|
||||
deconv->Run();
|
||||
CompareOutputData(reinterpret_cast<float *>(outputs_[0]->MutableData()), correct, total_size, 0.0001);
|
||||
ASSERT_EQ(0, CompareOutputData(reinterpret_cast<float *>(outputs_[0]->MutableData()), correct, total_size, 0.0001));
|
||||
|
||||
delete deconv;
|
||||
for (auto t : inputs_) delete t;
|
||||
|
@ -625,15 +624,15 @@ TEST_F(TestDeConvolutionFp32, DeConvTest3) {
|
|||
auto deconv_param = new ConvParameter();
|
||||
float *correct;
|
||||
int total_size = DeConvTestInit3(&inputs_, &outputs_, deconv_param, &correct);
|
||||
lite::InnerContext *ctx = new lite::InnerContext;
|
||||
auto *ctx = new lite::InnerContext;
|
||||
ctx->thread_num_ = 2;
|
||||
ASSERT_EQ(lite::RET_OK, ctx->Init());
|
||||
kernel::DeConvolutionCPUKernel *deconv =
|
||||
auto *deconv =
|
||||
new kernel::DeConvolutionCPUKernel(reinterpret_cast<OpParameter *>(deconv_param), inputs_, outputs_, ctx, nullptr);
|
||||
|
||||
deconv->Init();
|
||||
deconv->Run();
|
||||
CompareOutputData(reinterpret_cast<float *>(outputs_[0]->MutableData()), correct, total_size, 0.0001);
|
||||
ASSERT_EQ(0, CompareOutputData(reinterpret_cast<float *>(outputs_[0]->MutableData()), correct, total_size, 0.0001));
|
||||
|
||||
delete deconv;
|
||||
for (auto t : inputs_) delete t;
|
||||
|
@ -694,15 +693,15 @@ TEST_F(TestDeConvolutionFp32, DeConvTest4) {
|
|||
auto deconv_param = new ConvParameter();
|
||||
float *correct;
|
||||
int total_size = DeConvTestInit4(&inputs_, &outputs_, deconv_param, &correct);
|
||||
lite::InnerContext *ctx = new lite::InnerContext;
|
||||
auto *ctx = new lite::InnerContext;
|
||||
ctx->thread_num_ = 2;
|
||||
ASSERT_EQ(lite::RET_OK, ctx->Init());
|
||||
kernel::DeConvolutionCPUKernel *deconv =
|
||||
auto *deconv =
|
||||
new kernel::DeConvolutionCPUKernel(reinterpret_cast<OpParameter *>(deconv_param), inputs_, outputs_, ctx, nullptr);
|
||||
|
||||
deconv->Init();
|
||||
deconv->Run();
|
||||
CompareOutputData(reinterpret_cast<float *>(outputs_[0]->MutableData()), correct, total_size, 0.0001);
|
||||
ASSERT_EQ(0, CompareOutputData(reinterpret_cast<float *>(outputs_[0]->MutableData()), correct, total_size, 0.0001));
|
||||
|
||||
/* running warm up */
|
||||
for (int i = 0; i < 0; i++) {
|
||||
|
|
|
@ -51,7 +51,7 @@ TEST_F(DepthToSpaceTestFp32, DepthToSpaceTest2) {
|
|||
std::cout << output[i] << " ";
|
||||
}
|
||||
std::cout << "\n";
|
||||
CompareOutputData(output, expect_out, kOutSize, 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(output, expect_out, kOutSize, 0.000001));
|
||||
}
|
||||
|
||||
TEST_F(DepthToSpaceTestFp32, DepthToSpaceTest3) {
|
||||
|
@ -80,6 +80,6 @@ TEST_F(DepthToSpaceTestFp32, DepthToSpaceTest3) {
|
|||
std::cout << output[i] << " ";
|
||||
}
|
||||
std::cout << "\n";
|
||||
CompareOutputData(output, expect_out, kOutSize, 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(output, expect_out, kOutSize, 0.000001));
|
||||
}
|
||||
} // namespace mindspore
|
||||
|
|
|
@ -129,33 +129,33 @@ TEST_F(TestDetectionPostProcessFp32, Fast) {
|
|||
op->Init();
|
||||
op->Run();
|
||||
|
||||
float *output_boxes = reinterpret_cast<float *>(outputs_[0]->MutableData());
|
||||
auto *output_boxes = reinterpret_cast<float *>(outputs_[0]->MutableData());
|
||||
size_t output_boxes_size;
|
||||
std::string output_boxes_path = "./test_data/detectionPostProcess/output_0.bin";
|
||||
auto correct_boxes =
|
||||
reinterpret_cast<float *>(mindspore::lite::ReadFile(output_boxes_path.c_str(), &output_boxes_size));
|
||||
CompareOutputData(output_boxes, correct_boxes, outputs_[0]->ElementsNum(), 0.0001);
|
||||
ASSERT_EQ(0, CompareOutputData(output_boxes, correct_boxes, outputs_[0]->ElementsNum(), 0.0001));
|
||||
|
||||
float *output_classes = reinterpret_cast<float *>(outputs_[1]->MutableData());
|
||||
auto *output_classes = reinterpret_cast<float *>(outputs_[1]->MutableData());
|
||||
size_t output_classes_size;
|
||||
std::string output_classes_path = "./test_data/detectionPostProcess/output_1.bin";
|
||||
auto correct_classes =
|
||||
reinterpret_cast<float *>(mindspore::lite::ReadFile(output_classes_path.c_str(), &output_classes_size));
|
||||
CompareOutputData(output_classes, correct_classes, outputs_[1]->ElementsNum(), 0.0001);
|
||||
ASSERT_EQ(0, CompareOutputData(output_classes, correct_classes, outputs_[1]->ElementsNum(), 0.0001));
|
||||
|
||||
float *output_scores = reinterpret_cast<float *>(outputs_[2]->MutableData());
|
||||
auto *output_scores = reinterpret_cast<float *>(outputs_[2]->MutableData());
|
||||
size_t output_scores_size;
|
||||
std::string output_scores_path = "./test_data/detectionPostProcess/output_2.bin";
|
||||
auto correct_scores =
|
||||
reinterpret_cast<float *>(mindspore::lite::ReadFile(output_scores_path.c_str(), &output_scores_size));
|
||||
CompareOutputData(output_scores, correct_scores, outputs_[2]->ElementsNum(), 0.0001);
|
||||
ASSERT_EQ(0, CompareOutputData(output_scores, correct_scores, outputs_[2]->ElementsNum(), 0.0001));
|
||||
|
||||
float *output_num_det = reinterpret_cast<float *>(outputs_[3]->MutableData());
|
||||
auto *output_num_det = reinterpret_cast<float *>(outputs_[3]->MutableData());
|
||||
size_t output_num_det_size;
|
||||
std::string output_num_det_path = "./test_data/detectionPostProcess/output_3.bin";
|
||||
auto correct_num_det =
|
||||
reinterpret_cast<float *>(mindspore::lite::ReadFile(output_num_det_path.c_str(), &output_num_det_size));
|
||||
CompareOutputData(output_num_det, correct_num_det, outputs_[3]->ElementsNum(), 0.0001);
|
||||
ASSERT_EQ(0, CompareOutputData(output_num_det, correct_num_det, outputs_[3]->ElementsNum(), 0.0001));
|
||||
|
||||
delete op;
|
||||
for (auto t : inputs_) delete t;
|
||||
|
|
|
@ -33,14 +33,14 @@ class TestFcFp32 : public mindspore::CommonTest {
|
|||
|
||||
int FcTestInit1(std::vector<lite::Tensor *> *inputs_, std::vector<lite::Tensor *> *outputs_,
|
||||
MatMulParameter *matmal_param, float **correct) {
|
||||
Tensor *in_t = new Tensor(kNumberTypeFloat, {2, 2, 2, 2}, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
|
||||
auto *in_t = new Tensor(kNumberTypeFloat, {2, 2, 2, 2}, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
|
||||
in_t->MallocData();
|
||||
float in[] = {-3.2366564, -4.7733846, -7.8329225, 16.146885, 5.060793, -6.1471, -1.7680453, -6.5721383,
|
||||
17.87506, -5.1192183, 10.742863, 1.4536934, 19.693445, 19.45783, 5.063163, 0.5234792};
|
||||
memcpy(in_t->MutableData(), in, sizeof(float) * in_t->ElementsNum());
|
||||
inputs_->push_back(in_t);
|
||||
|
||||
Tensor *weight_t = new Tensor(kNumberTypeFloat, {3, 8}, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
|
||||
auto *weight_t = new Tensor(kNumberTypeFloat, {3, 8}, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
|
||||
weight_t->MallocData();
|
||||
float weight[] = {-0.0024438887, 0.0006738146, -0.008169129, 0.0021510671, -0.012470592, -0.0053063435,
|
||||
0.006050155, 0.008656233, 0.012911413, -0.0028635843, -0.00034080597, -0.0010622552,
|
||||
|
@ -49,13 +49,13 @@ int FcTestInit1(std::vector<lite::Tensor *> *inputs_, std::vector<lite::Tensor *
|
|||
memcpy(weight_t->MutableData(), weight, sizeof(float) * weight_t->ElementsNum());
|
||||
inputs_->push_back(weight_t);
|
||||
|
||||
Tensor *bias_t = new Tensor(kNumberTypeFloat, {3}, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
|
||||
auto *bias_t = new Tensor(kNumberTypeFloat, {3}, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
|
||||
bias_t->MallocData();
|
||||
float bias[] = {1.6103756, -0.9872417, 0.546849};
|
||||
memcpy(bias_t->MutableData(), bias, sizeof(float) * bias_t->ElementsNum());
|
||||
inputs_->push_back(bias_t);
|
||||
|
||||
Tensor *out_t = new Tensor(kNumberTypeFloat, {2, 3}, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
|
||||
auto *out_t = new Tensor(kNumberTypeFloat, {2, 3}, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
|
||||
out_t->MallocData();
|
||||
outputs_->push_back(out_t);
|
||||
|
||||
|
@ -76,44 +76,43 @@ TEST_F(TestFcFp32, FcTest1) {
|
|||
auto matmul_param = new MatMulParameter();
|
||||
float *correct;
|
||||
int total_size = FcTestInit1(&inputs_, &outputs_, matmul_param, &correct);
|
||||
lite::InnerContext *ctx = new lite::InnerContext;
|
||||
auto *ctx = new lite::InnerContext;
|
||||
ctx->thread_num_ = 2;
|
||||
ASSERT_EQ(lite::RET_OK, ctx->Init());
|
||||
kernel::FullconnectionCPUKernel *fc =
|
||||
auto *fc =
|
||||
new kernel::FullconnectionCPUKernel(reinterpret_cast<OpParameter *>(matmul_param), inputs_, outputs_, ctx, nullptr);
|
||||
|
||||
fc->Init();
|
||||
fc->Run();
|
||||
CompareOutputData(reinterpret_cast<float *>(outputs_[0]->MutableData()), correct, total_size, 0.0001);
|
||||
ASSERT_EQ(0, CompareOutputData(reinterpret_cast<float *>(outputs_[0]->MutableData()), correct, total_size, 0.0001));
|
||||
}
|
||||
|
||||
int FcTestInit2(std::vector<lite::Tensor *> *inputs_, std::vector<lite::Tensor *> *outputs_,
|
||||
MatMulParameter *matmal_param, float **correct) {
|
||||
size_t buffer_size;
|
||||
|
||||
Tensor *in_t =
|
||||
new Tensor(kNumberTypeFloat, {20, 4, 2, 10}, schema::Format_NCHW, lite::Tensor::Category::CONST_TENSOR);
|
||||
auto *in_t = new Tensor(kNumberTypeFloat, {20, 4, 2, 10}, schema::Format_NCHW, lite::Tensor::Category::CONST_TENSOR);
|
||||
in_t->MallocData();
|
||||
std::string in_path = "./matmul/FcFp32_input1.bin";
|
||||
auto in_data = mindspore::lite::ReadFile(in_path.c_str(), &buffer_size);
|
||||
memcpy(in_t->MutableData(), in_data, buffer_size);
|
||||
inputs_->push_back(in_t);
|
||||
|
||||
Tensor *weight_t = new Tensor(kNumberTypeFloat, {30, 80}, schema::Format_NCHW, lite::Tensor::Category::CONST_TENSOR);
|
||||
auto *weight_t = new Tensor(kNumberTypeFloat, {30, 80}, schema::Format_NCHW, lite::Tensor::Category::CONST_TENSOR);
|
||||
weight_t->MallocData();
|
||||
std::string weight_path = "./matmul/FcFp32_weight1.bin";
|
||||
auto w_data = mindspore::lite::ReadFile(weight_path.c_str(), &buffer_size);
|
||||
memcpy(weight_t->MutableData(), w_data, buffer_size);
|
||||
inputs_->push_back(weight_t);
|
||||
|
||||
Tensor *bias_t = new Tensor(kNumberTypeFloat, {30}, schema::Format_NCHW, lite::Tensor::Category::CONST_TENSOR);
|
||||
auto *bias_t = new Tensor(kNumberTypeFloat, {30}, schema::Format_NCHW, lite::Tensor::Category::CONST_TENSOR);
|
||||
bias_t->MallocData();
|
||||
std::string bias_path = "./matmul/FcFp32_bias1.bin";
|
||||
auto bias_data = mindspore::lite::ReadFile(bias_path.c_str(), &buffer_size);
|
||||
memcpy(bias_t->MutableData(), bias_data, buffer_size);
|
||||
inputs_->push_back(bias_t);
|
||||
|
||||
Tensor *out_t = new Tensor(kNumberTypeFloat, {20, 30}, schema::Format_NCHW, lite::Tensor::Category::CONST_TENSOR);
|
||||
auto *out_t = new Tensor(kNumberTypeFloat, {20, 30}, schema::Format_NCHW, lite::Tensor::Category::CONST_TENSOR);
|
||||
out_t->MallocData();
|
||||
outputs_->push_back(out_t);
|
||||
|
||||
|
@ -135,26 +134,26 @@ TEST_F(TestFcFp32, FcTest2) {
|
|||
auto matmul_param = new MatMulParameter();
|
||||
float *correct;
|
||||
int total_size = FcTestInit2(&inputs_, &outputs_, matmul_param, &correct);
|
||||
lite::InnerContext *ctx = new lite::InnerContext;
|
||||
auto *ctx = new lite::InnerContext;
|
||||
ctx->thread_num_ = 1;
|
||||
ASSERT_EQ(lite::RET_OK, ctx->Init());
|
||||
kernel::FullconnectionCPUKernel *fc =
|
||||
auto *fc =
|
||||
new kernel::FullconnectionCPUKernel(reinterpret_cast<OpParameter *>(matmul_param), inputs_, outputs_, ctx, nullptr);
|
||||
|
||||
fc->Init();
|
||||
fc->Run();
|
||||
CompareOutputData(reinterpret_cast<float *>(outputs_[0]->MutableData()), correct, total_size, 0.0001);
|
||||
ASSERT_EQ(0, CompareOutputData(reinterpret_cast<float *>(outputs_[0]->MutableData()), correct, total_size, 0.0001));
|
||||
}
|
||||
|
||||
void FcTestInit3(std::vector<lite::Tensor *> *inputs_, std::vector<lite::Tensor *> *outputs_,
|
||||
MatMulParameter *matmal_param, float **correct) {
|
||||
Tensor *in_t = new Tensor(kNumberTypeFloat, {1, 1, 1, 20}, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
|
||||
auto *in_t = new Tensor(kNumberTypeFloat, {1, 1, 1, 20}, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
|
||||
in_t->MallocData();
|
||||
float in[] = {1, 0, 3, 0, 4, 5, 2, 5, 2, 5, 1, 5, 0, 1, 2, 0, 2, 1, 0, 5};
|
||||
memcpy(in_t->MutableData(), in, sizeof(float) * in_t->ElementsNum());
|
||||
inputs_->push_back(in_t);
|
||||
|
||||
Tensor *weight_t = new Tensor(kNumberTypeFloat, {16, 20}, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
|
||||
auto *weight_t = new Tensor(kNumberTypeFloat, {16, 20}, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
|
||||
weight_t->MallocData();
|
||||
float weight[] = {0, 5, 5, 3, 0, 5, 3, 1, 0, 1, 3, 0, 5, 5, 2, 4, 0, 1, 1, 2, 3, 0, 5, 5, 4, 4, 1, 4, 1, 1, 5, 3,
|
||||
3, 1, 0, 3, 1, 2, 4, 5, 3, 4, 4, 0, 3, 5, 0, 3, 4, 1, 0, 1, 3, 4, 0, 5, 2, 5, 0, 4, 2, 2, 2, 2,
|
||||
|
@ -169,7 +168,7 @@ void FcTestInit3(std::vector<lite::Tensor *> *inputs_, std::vector<lite::Tensor
|
|||
memcpy(weight_t->MutableData(), weight, sizeof(float) * weight_t->ElementsNum());
|
||||
inputs_->push_back(weight_t);
|
||||
|
||||
Tensor *out_t = new Tensor(kNumberTypeFloat, {1, 16}, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
|
||||
auto *out_t = new Tensor(kNumberTypeFloat, {1, 16}, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
|
||||
out_t->MallocData();
|
||||
outputs_->push_back(out_t);
|
||||
|
||||
|
@ -185,17 +184,17 @@ TEST_F(TestFcFp32, FcTest3) {
|
|||
auto matmul_param = new MatMulParameter();
|
||||
float *correct;
|
||||
FcTestInit3(&inputs_, &outputs_, matmul_param, &correct);
|
||||
lite::InnerContext *ctx = new lite::InnerContext;
|
||||
auto *ctx = new lite::InnerContext;
|
||||
ctx->thread_num_ = 1;
|
||||
ASSERT_EQ(lite::RET_OK, ctx->Init());
|
||||
kernel::FullconnectionCPUKernel *fc =
|
||||
auto *fc =
|
||||
new kernel::FullconnectionCPUKernel(reinterpret_cast<OpParameter *>(matmul_param), inputs_, outputs_, ctx, nullptr);
|
||||
|
||||
fc->Init();
|
||||
struct timeval start, end;
|
||||
gettimeofday(&start, NULL);
|
||||
gettimeofday(&start, nullptr);
|
||||
for (int i = 0; i < 100000; ++i) fc->Run();
|
||||
gettimeofday(&end, NULL);
|
||||
gettimeofday(&end, nullptr);
|
||||
// printf("## elapsed: %llu\n", 1000000 * (end.tv_sec - start.tv_sec) + end.tv_usec - end.tv_usec);
|
||||
}
|
||||
|
||||
|
|
|
@ -69,7 +69,7 @@ TEST_F(TestInstanceNormFp32, INTest1) {
|
|||
std::cout << output[i] << " ,";
|
||||
}
|
||||
std::cout << std::endl;
|
||||
CompareOutputData(output.data(), corr_out.data(), output0_tensor.ElementsNum(), 0.001);
|
||||
ASSERT_EQ(0, CompareOutputData(output.data(), corr_out.data(), output0_tensor.ElementsNum(), 0.001));
|
||||
|
||||
input0_tensor.set_data(nullptr);
|
||||
input1_tensor.set_data(nullptr);
|
||||
|
@ -124,7 +124,7 @@ TEST_F(TestInstanceNormFp32, INTest2) {
|
|||
std::cout << output[i] << " ,";
|
||||
}
|
||||
std::cout << std::endl;
|
||||
CompareOutputData(output.data(), corr_out.data(), output0_tensor.ElementsNum(), 0.001);
|
||||
ASSERT_EQ(0, CompareOutputData(output.data(), corr_out.data(), output0_tensor.ElementsNum(), 0.001));
|
||||
|
||||
input0_tensor.set_data(nullptr);
|
||||
input1_tensor.set_data(nullptr);
|
||||
|
|
|
@ -92,7 +92,7 @@ TEST_F(TestL2NormFp32, Test1) {
|
|||
auto ret = kernel_->Run();
|
||||
EXPECT_EQ(0, ret);
|
||||
|
||||
CompareOutputData(output_data, expect.data(), output_size, err_tol_);
|
||||
ASSERT_EQ(0, CompareOutputData(output_data, expect.data(), output_size, err_tol_));
|
||||
}
|
||||
|
||||
// 2thread all axis relu
|
||||
|
@ -113,7 +113,7 @@ TEST_F(TestL2NormFp32, Test2) {
|
|||
auto ret = kernel_->Run();
|
||||
EXPECT_EQ(0, ret);
|
||||
|
||||
CompareOutputData(output_data, expect.data(), output_size, err_tol_);
|
||||
ASSERT_EQ(0, CompareOutputData(output_data, expect.data(), output_size, err_tol_));
|
||||
}
|
||||
|
||||
// 4 thread trailing axis no activation
|
||||
|
@ -134,7 +134,7 @@ TEST_F(TestL2NormFp32, Test3) {
|
|||
auto ret = kernel_->Run();
|
||||
EXPECT_EQ(0, ret);
|
||||
|
||||
CompareOutputData(output_data, expect.data(), output_size, err_tol_);
|
||||
ASSERT_EQ(0, CompareOutputData(output_data, expect.data(), output_size, err_tol_));
|
||||
}
|
||||
|
||||
// 1 thread trailing axis no activation
|
||||
|
@ -155,7 +155,7 @@ TEST_F(TestL2NormFp32, Test4) {
|
|||
auto ret = kernel_->Run();
|
||||
EXPECT_EQ(0, ret);
|
||||
|
||||
CompareOutputData(output_data, expect.data(), output_size, err_tol_);
|
||||
ASSERT_EQ(0, CompareOutputData(output_data, expect.data(), output_size, err_tol_));
|
||||
}
|
||||
|
||||
} // namespace mindspore
|
||||
|
|
|
@ -71,7 +71,7 @@ TEST_F(TestLshProjectionFp32, Dense1DInputs) {
|
|||
|
||||
std::vector<int32_t> except_result = {0, 0, 0, 1, 0, 0};
|
||||
PrintData("output data", output_data, 6);
|
||||
CompareOutputData(output_data, except_result.data(), 6, 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(output_data, except_result.data(), 6, 0.000001));
|
||||
|
||||
in_tensor0.set_data(nullptr);
|
||||
in_tensor1.set_data(nullptr);
|
||||
|
@ -111,7 +111,7 @@ TEST_F(TestLshProjectionFp32, Sparse1DInputs) {
|
|||
|
||||
std::vector<int32_t> except_result = {0, 5, 8};
|
||||
PrintData("output data", output_data, 3);
|
||||
CompareOutputData(output_data, except_result.data(), 3, 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(output_data, except_result.data(), 3, 0.000001));
|
||||
|
||||
in_tensor0.set_data(nullptr);
|
||||
in_tensor1.set_data(nullptr);
|
||||
|
@ -155,7 +155,7 @@ TEST_F(TestLshProjectionFp32, Sparse3DInputs) {
|
|||
|
||||
std::vector<int32_t> except_result = {2, 5, 9};
|
||||
PrintData("output data", output_data, 3);
|
||||
CompareOutputData(output_data, except_result.data(), 3, 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(output_data, except_result.data(), 3, 0.000001));
|
||||
|
||||
in_tensor0.set_data(nullptr);
|
||||
in_tensor1.set_data(nullptr);
|
||||
|
|
|
@ -23,7 +23,7 @@
|
|||
namespace mindspore {
|
||||
class LstmFp32 : public mindspore::CommonTest {
|
||||
public:
|
||||
LstmFp32() {}
|
||||
LstmFp32() = default;
|
||||
};
|
||||
|
||||
void InitLstmParam(LstmParameter *lstm_param) {
|
||||
|
@ -124,7 +124,7 @@ void InitLstmForwardCreator(std::vector<lite::Tensor *> *inputs, std::vector<lit
|
|||
outputs->push_back(hidden_state);
|
||||
}
|
||||
|
||||
void CompareOutput(lite::Tensor *output, std::vector<float> data) {
|
||||
void CompareResult(lite::Tensor *output, std::vector<float> data) {
|
||||
for (int i = 0; i < output->ElementsNum(); i++) {
|
||||
std::cout << reinterpret_cast<float *>(output->MutableData())[i] << ", ";
|
||||
}
|
||||
|
@ -162,20 +162,20 @@ TEST_F(LstmFp32, LstmForwardFp32Accuracy) {
|
|||
std::cout << "==================output data=================" << std::endl;
|
||||
std::vector<float> output0_data = {-0.0702, 0.1225, 0.0876, -0.0357, -0.0227, -0.2294,
|
||||
-0.0345, -0.0108, -0.2002, 0.0451, 0.0853, -0.1205};
|
||||
CompareOutput(outputs[0], output0_data);
|
||||
CompareResult(outputs[0], output0_data);
|
||||
|
||||
std::vector<float> output1_data = {0.0451, 0.0853, -0.1205};
|
||||
CompareOutput(outputs[1], output1_data);
|
||||
CompareResult(outputs[1], output1_data);
|
||||
|
||||
std::vector<float> output2_data = {0.0989, 0.2094, -0.4132};
|
||||
CompareOutput(outputs[2], output2_data);
|
||||
CompareResult(outputs[2], output2_data);
|
||||
|
||||
delete lstm_param;
|
||||
for (unsigned int i = 0; i < inputs.size() - 1; i++) {
|
||||
delete inputs[i];
|
||||
}
|
||||
for (unsigned int i = 0; i < outputs.size(); i++) {
|
||||
delete outputs[i];
|
||||
for (auto &output : outputs) {
|
||||
delete output;
|
||||
}
|
||||
delete kernel;
|
||||
MS_LOG(INFO) << "LstmFp32 forward accuracy passed";
|
||||
|
@ -312,20 +312,20 @@ TEST_F(LstmFp32, LstmBackwardFp32Accuracy) {
|
|||
std::vector<float> output0_data = {-0.2922, -0.1416, 0.0077, -0.0422, -0.0585, 0.2061, -0.2385, -0.0146,
|
||||
-0.1796, -0.0554, -0.0973, 0.1013, -0.3062, -0.1516, -0.0310, 0.0459,
|
||||
-0.0784, 0.0949, 0.0249, -0.0653, -0.0869, -0.1113, -0.2155, -0.0500};
|
||||
CompareOutput(outputs[0], output0_data);
|
||||
CompareResult(outputs[0], output0_data);
|
||||
|
||||
std::vector<float> output1_data = {0.0249, -0.0653, -0.0869, -0.0422, -0.0585, 0.2061};
|
||||
CompareOutput(outputs[1], output1_data);
|
||||
CompareResult(outputs[1], output1_data);
|
||||
|
||||
std::vector<float> output2_data = {0.0373, -0.2322, -0.1477, -0.1621, -0.1808, 0.5146};
|
||||
CompareOutput(outputs[2], output2_data);
|
||||
CompareResult(outputs[2], output2_data);
|
||||
|
||||
delete lstm_param;
|
||||
for (unsigned int i = 0; i < inputs.size() - 1; i++) {
|
||||
delete inputs[i];
|
||||
}
|
||||
for (unsigned int i = 0; i < outputs.size(); i++) {
|
||||
delete outputs[i];
|
||||
for (auto &output : outputs) {
|
||||
delete output;
|
||||
}
|
||||
delete kernel;
|
||||
MS_LOG(INFO) << "LstmFp32 backward accuracy passed";
|
||||
|
|
|
@ -45,7 +45,7 @@ TEST_F(TestMatMulFp32, Row2Col8Test1) {
|
|||
0.75, 0.24, 0, 0, 0, 0, 0, 0, 0.66, 0.52, 0, 0, 0, 0, 0, 0};
|
||||
float out[144] = {0};
|
||||
RowMajor2Col8Major(in, out, 10, 9);
|
||||
CompareOutputData(out, co, 144, 0.0001);
|
||||
ASSERT_EQ(0, CompareOutputData(out, co, 144, 0.0001));
|
||||
}
|
||||
|
||||
TEST_F(TestMatMulFp32, Row2Col8Test2) {
|
||||
|
@ -65,11 +65,11 @@ TEST_F(TestMatMulFp32, Row2Col8Test2) {
|
|||
0.24, 0, 0, 0, 0, 0, 0, 0.92, 0.52, 0, 0, 0, 0, 0, 0};
|
||||
float out[120] = {0};
|
||||
RowMajor2Col8Major(in, out, 18, 5);
|
||||
CompareOutputData(out, co, 120, 0.0001);
|
||||
ASSERT_EQ(0, CompareOutputData(out, co, 120, 0.0001));
|
||||
}
|
||||
|
||||
int MMTestInit(std::vector<lite::Tensor *> *inputs_, std::vector<lite::Tensor *> *outputs_, float *a_ptr, float *b_ptr,
|
||||
std::vector<int> a_shape, std::vector<int> b_shape, std::vector<int> c_shape) {
|
||||
const std::vector<int> &a_shape, const std::vector<int> &b_shape, const std::vector<int> &c_shape) {
|
||||
auto in_t = new lite::Tensor(kNumberTypeFloat, a_shape, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
|
||||
in_t->MallocData();
|
||||
memcpy(in_t->MutableData(), a_ptr, sizeof(float) * in_t->ElementsNum());
|
||||
|
@ -89,8 +89,8 @@ int MMTestInit(std::vector<lite::Tensor *> *inputs_, std::vector<lite::Tensor *>
|
|||
}
|
||||
|
||||
int MMTestInit2(std::vector<lite::Tensor *> *inputs_, std::vector<lite::Tensor *> *outputs_, float *a_ptr, float *b_ptr,
|
||||
float *bias_ptr, std::vector<int> a_shape, std::vector<int> b_shape, std::vector<int> bias_shape,
|
||||
std::vector<int> c_shape) {
|
||||
float *bias_ptr, const std::vector<int> &a_shape, const std::vector<int> &b_shape,
|
||||
const std::vector<int> &bias_shape, const std::vector<int> &c_shape) {
|
||||
auto in_t = new lite::Tensor(kNumberTypeFloat, a_shape, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
|
||||
in_t->MallocData();
|
||||
memcpy(in_t->MutableData(), a_ptr, sizeof(float) * in_t->ElementsNum());
|
||||
|
@ -140,7 +140,7 @@ TEST_F(TestMatMulFp32, simple) {
|
|||
mm->Run();
|
||||
float correct[] = {-0.1256939023733139, -0.07744802534580231, 0.07410638779401779,
|
||||
-0.3049793541431427, -0.027687929570674896, -0.18109679222106934};
|
||||
CompareOutputData(reinterpret_cast<float *>(outputs_[0]->MutableData()), correct, total_size, 0.0001);
|
||||
ASSERT_EQ(0, CompareOutputData(reinterpret_cast<float *>(outputs_[0]->MutableData()), correct, total_size, 0.0001));
|
||||
delete mm;
|
||||
for (auto t : inputs_) delete t;
|
||||
for (auto t : outputs_) delete t;
|
||||
|
@ -173,7 +173,7 @@ TEST_F(TestMatMulFp32, simple_bias) {
|
|||
mm->Run();
|
||||
float correct[] = {-0.1256939023733139 + 1, -0.07744802534580231 + 2, 0.07410638779401779 + 3,
|
||||
-0.3049793541431427 + 1, -0.027687929570674896 + 2, -0.18109679222106934 + 3};
|
||||
CompareOutputData(reinterpret_cast<float *>(outputs_[0]->MutableData()), correct, total_size, 0.0001);
|
||||
ASSERT_EQ(0, CompareOutputData(reinterpret_cast<float *>(outputs_[0]->MutableData()), correct, total_size, 0.0001));
|
||||
delete mm;
|
||||
for (auto t : inputs_) delete t;
|
||||
for (auto t : outputs_) delete t;
|
||||
|
@ -264,7 +264,7 @@ TEST_F(TestMatMulFp32, simple2) {
|
|||
346, 486, 451, 451, 490, 475, 339, 319, 409, 315, 324, 367, 493, 286, 348, 185, 240, 287, 214, 312, 265, 237, 218,
|
||||
261, 316, 279, 186, 377, 319, 279, 304, 281, 207, 261, 209, 287, 270, 415, 378, 312, 388, 423, 273, 230, 294, 239,
|
||||
243, 319, 346};
|
||||
CompareOutputData(reinterpret_cast<float *>(outputs_[0]->MutableData()), correct, total_size, 0.0001);
|
||||
ASSERT_EQ(0, CompareOutputData(reinterpret_cast<float *>(outputs_[0]->MutableData()), correct, total_size, 0.0001));
|
||||
delete mm;
|
||||
for (auto t : inputs_) delete t;
|
||||
for (auto t : outputs_) delete t;
|
||||
|
@ -294,7 +294,7 @@ TEST_F(TestMatMulFp32, simple_transb) {
|
|||
mm->Init();
|
||||
mm->Run();
|
||||
float correct[] = {0.00533547, 0.002545945, 0.062974121, -0.445441471, -0.246223617, -0.142070031};
|
||||
CompareOutputData(reinterpret_cast<float *>(outputs_[0]->MutableData()), correct, total_size, 0.0001);
|
||||
ASSERT_EQ(0, CompareOutputData(reinterpret_cast<float *>(outputs_[0]->MutableData()), correct, total_size, 0.0001));
|
||||
delete mm;
|
||||
for (auto t : inputs_) delete t;
|
||||
for (auto t : outputs_) delete t;
|
||||
|
@ -348,7 +348,7 @@ TEST_F(TestMatMulFp32, batch) {
|
|||
-17.63555145263672, -8.490625381469727, 5.317771911621094, -14.561882019042969,
|
||||
-7.251564025878906, -2.508212089538574, 5.86458683013916, -3.466249465942383,
|
||||
8.869029998779297, 25.034008026123047};
|
||||
CompareOutputData(reinterpret_cast<float *>(outputs_[0]->MutableData()), correct, total_size, 0.0001);
|
||||
ASSERT_EQ(0, CompareOutputData(reinterpret_cast<float *>(outputs_[0]->MutableData()), correct, total_size, 0.0001));
|
||||
delete mm;
|
||||
for (auto t : inputs_) delete t;
|
||||
for (auto t : outputs_) delete t;
|
||||
|
|
|
@ -110,7 +110,8 @@ TEST_F(TestNMSFp32, TestCase1) {
|
|||
EXPECT_EQ(0, ret);
|
||||
|
||||
std::vector<int32_t> expect{0, 0, 3, 0, 0, 0, 0, 0, 5};
|
||||
CompareOutputData(reinterpret_cast<int32_t *>(out_tensor_.data_c()), expect.data(), output_size, err_tol_);
|
||||
ASSERT_EQ(0,
|
||||
CompareOutputData(reinterpret_cast<int32_t *>(out_tensor_.data_c()), expect.data(), output_size, err_tol_));
|
||||
}
|
||||
|
||||
} // namespace mindspore
|
||||
|
|
|
@ -131,7 +131,7 @@ TEST_F(TestPadFp32, TestPad1) {
|
|||
20.0, 21.0, 22.0, 23.0, 21.0, 22.0, 23.0, 18.0, 19.0, 20.0, 15.0, 16.0, 17.0, 12.0, 13.0, 14.0, 9.0, 10.0, 11.0,
|
||||
6.0, 7.0, 8.0, 3.0, 4.0, 5.0, 0.0, 1.0, 2.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0,
|
||||
10.0, 11.0, 9.0, 10.0, 11.0, 6.0, 7.0, 8.0, 3.0, 4.0, 5.0, 0.0, 1.0, 2.0};
|
||||
CompareOutputData(out_data, expect.data(), 432, err_tol);
|
||||
ASSERT_EQ(0, CompareOutputData(out_data, expect.data(), 432, err_tol));
|
||||
}
|
||||
|
||||
TEST_F(TestPadFp32, TestPad2) {
|
||||
|
@ -166,7 +166,7 @@ TEST_F(TestPadFp32, TestPad2) {
|
|||
16.0, 17.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 18.0, 19.0, 20.0, 15.0, 16.0,
|
||||
17.0, 12.0, 13.0, 14.0, 9.0, 10.0, 11.0, 6.0, 7.0, 8.0, 3.0, 4.0, 5.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0,
|
||||
6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 6.0, 7.0, 8.0, 3.0, 4.0, 5.0, 0.0, 1.0, 2.0};
|
||||
CompareOutputData(out_data, expect.data(), 300, err_tol);
|
||||
ASSERT_EQ(0, CompareOutputData(out_data, expect.data(), 300, err_tol));
|
||||
}
|
||||
|
||||
TEST_F(TestPadFp32, TestPad3) {
|
||||
|
@ -202,7 +202,7 @@ TEST_F(TestPadFp32, TestPad3) {
|
|||
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
|
||||
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
|
||||
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0};
|
||||
CompareOutputData(out_data, expect.data(), 300, err_tol);
|
||||
ASSERT_EQ(0, CompareOutputData(out_data, expect.data(), 300, err_tol));
|
||||
}
|
||||
|
||||
TEST_F(TestPadFp32, TestPad4) {
|
||||
|
@ -238,7 +238,7 @@ TEST_F(TestPadFp32, TestPad4) {
|
|||
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
||||
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
||||
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0};
|
||||
CompareOutputData(out_data, expect.data(), 300, err_tol);
|
||||
ASSERT_EQ(0, CompareOutputData(out_data, expect.data(), 300, err_tol));
|
||||
}
|
||||
|
||||
TEST_F(TestPadFp32, TestPad5) {
|
||||
|
@ -274,6 +274,6 @@ TEST_F(TestPadFp32, TestPad5) {
|
|||
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
||||
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
|
||||
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0};
|
||||
CompareOutputData(out_data, expect.data(), 300, err_tol);
|
||||
ASSERT_EQ(0, CompareOutputData(out_data, expect.data(), 300, err_tol));
|
||||
}
|
||||
} // namespace mindspore
|
||||
|
|
|
@ -26,7 +26,8 @@ class TestPowerFp32 : public mindspore::CommonTest {
|
|||
};
|
||||
|
||||
int PowerTestInit(std::vector<lite::Tensor *> *inputs_, std::vector<lite::Tensor *> *outputs_, float *a_ptr,
|
||||
float *b_ptr, std::vector<int> a_shape, std::vector<int> b_shape, std::vector<int> c_shape) {
|
||||
float *b_ptr, const std::vector<int> &a_shape, const std::vector<int> &b_shape,
|
||||
const std::vector<int> &c_shape) {
|
||||
auto in_t = new lite::Tensor(kNumberTypeFloat, a_shape, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
|
||||
in_t->MallocData();
|
||||
memcpy(in_t->MutableData(), a_ptr, sizeof(float) * in_t->ElementsNum());
|
||||
|
@ -46,7 +47,7 @@ int PowerTestInit(std::vector<lite::Tensor *> *inputs_, std::vector<lite::Tensor
|
|||
}
|
||||
|
||||
int PowerTestInit2(std::vector<lite::Tensor *> *inputs_, std::vector<lite::Tensor *> *outputs_, float *a_ptr,
|
||||
std::vector<int> a_shape, std::vector<int> c_shape) {
|
||||
const std::vector<int> &a_shape, const std::vector<int> &c_shape) {
|
||||
auto in_t = new lite::Tensor(kNumberTypeFloat, a_shape, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
|
||||
in_t->MallocData();
|
||||
memcpy(in_t->MutableData(), a_ptr, sizeof(float) * in_t->ElementsNum());
|
||||
|
@ -74,12 +75,11 @@ TEST_F(TestPowerFp32, Simple) {
|
|||
auto ctx = new lite::InnerContext;
|
||||
ctx->thread_num_ = 1;
|
||||
ASSERT_EQ(lite::RET_OK, ctx->Init());
|
||||
kernel::PowerCPUKernel *op =
|
||||
new kernel::PowerCPUKernel(reinterpret_cast<OpParameter *>(param), inputs_, outputs_, ctx, nullptr);
|
||||
auto *op = new kernel::PowerCPUKernel(reinterpret_cast<OpParameter *>(param), inputs_, outputs_, ctx, nullptr);
|
||||
op->Init();
|
||||
op->Run();
|
||||
float correct[] = {1, 64, 2187, 65536};
|
||||
CompareOutputData(reinterpret_cast<float *>(outputs_[0]->MutableData()), correct, total_size, 0.0001);
|
||||
ASSERT_EQ(0, CompareOutputData(reinterpret_cast<float *>(outputs_[0]->MutableData()), correct, total_size, 0.0001));
|
||||
delete op;
|
||||
for (auto t : inputs_) delete t;
|
||||
for (auto t : outputs_) delete t;
|
||||
|
@ -99,12 +99,11 @@ TEST_F(TestPowerFp32, Broadcast) {
|
|||
auto ctx = new lite::InnerContext;
|
||||
ctx->thread_num_ = 2;
|
||||
ASSERT_EQ(lite::RET_OK, ctx->Init());
|
||||
kernel::PowerCPUKernel *op =
|
||||
new kernel::PowerCPUKernel(reinterpret_cast<OpParameter *>(param), inputs_, outputs_, ctx, nullptr);
|
||||
auto *op = new kernel::PowerCPUKernel(reinterpret_cast<OpParameter *>(param), inputs_, outputs_, ctx, nullptr);
|
||||
op->Init();
|
||||
op->Run();
|
||||
float correct[] = {1, 4, 9, 16};
|
||||
CompareOutputData(reinterpret_cast<float *>(outputs_[0]->MutableData()), correct, total_size, 0.0001);
|
||||
ASSERT_EQ(0, CompareOutputData(reinterpret_cast<float *>(outputs_[0]->MutableData()), correct, total_size, 0.0001));
|
||||
delete op;
|
||||
for (auto t : inputs_) delete t;
|
||||
for (auto t : outputs_) delete t;
|
||||
|
|
|
@ -116,7 +116,7 @@ TEST_F(TestReduceFp32, Mean1) {
|
|||
kernel_->Run();
|
||||
|
||||
int output_size = 24;
|
||||
CompareOutputData(out, correct, output_size, err_tol);
|
||||
ASSERT_EQ(0, CompareOutputData(out, correct, output_size, err_tol));
|
||||
}
|
||||
|
||||
// thread num 2 reduce_to_end
|
||||
|
@ -143,7 +143,7 @@ TEST_F(TestReduceFp32, Mean2) {
|
|||
kernel_->Run();
|
||||
|
||||
int output_size = 2;
|
||||
CompareOutputData(out, correct, output_size, err_tol);
|
||||
ASSERT_EQ(0, CompareOutputData(out, correct, output_size, err_tol));
|
||||
}
|
||||
|
||||
// thread num 1
|
||||
|
@ -171,7 +171,7 @@ TEST_F(TestReduceFp32, Mean3) {
|
|||
kernel_->Run();
|
||||
|
||||
int output_size = 2;
|
||||
CompareOutputData(out, correct, output_size, err_tol);
|
||||
ASSERT_EQ(0, CompareOutputData(out, correct, output_size, err_tol));
|
||||
}
|
||||
|
||||
TEST_F(TestReduceFp32, MeanAllAxis) {
|
||||
|
@ -197,7 +197,7 @@ TEST_F(TestReduceFp32, MeanAllAxis) {
|
|||
kernel_->Run();
|
||||
|
||||
int output_size = 1;
|
||||
CompareOutputData(out, correct, output_size, err_tol);
|
||||
ASSERT_EQ(0, CompareOutputData(out, correct, output_size, err_tol));
|
||||
}
|
||||
|
||||
TEST_F(TestReduceFp32, Sum) {
|
||||
|
@ -224,7 +224,7 @@ TEST_F(TestReduceFp32, Sum) {
|
|||
kernel_->Run();
|
||||
|
||||
int output_size = 24;
|
||||
CompareOutputData(out, correct, output_size, err_tol);
|
||||
ASSERT_EQ(0, CompareOutputData(out, correct, output_size, err_tol));
|
||||
}
|
||||
|
||||
// sum reduce_to_end
|
||||
|
@ -253,7 +253,7 @@ TEST_F(TestReduceFp32, Sum2) {
|
|||
kernel_->Run();
|
||||
|
||||
int output_size = 32;
|
||||
CompareOutputData(out, correct, output_size, err_tol);
|
||||
ASSERT_EQ(0, CompareOutputData(out, correct, output_size, err_tol));
|
||||
}
|
||||
|
||||
TEST_F(TestReduceFp32, Sum3) {
|
||||
|
@ -281,7 +281,7 @@ TEST_F(TestReduceFp32, Sum3) {
|
|||
kernel_->Run();
|
||||
|
||||
int output_size = 32;
|
||||
CompareOutputData(out, correct, output_size, err_tol);
|
||||
ASSERT_EQ(0, CompareOutputData(out, correct, output_size, err_tol));
|
||||
}
|
||||
|
||||
TEST_F(TestReduceFp32, SumAllAxis) {
|
||||
|
@ -306,7 +306,7 @@ TEST_F(TestReduceFp32, SumAllAxis) {
|
|||
kernel_->Run();
|
||||
|
||||
int output_size = 1;
|
||||
CompareOutputData(out, correct, output_size, err_tol);
|
||||
ASSERT_EQ(0, CompareOutputData(out, correct, output_size, err_tol));
|
||||
}
|
||||
|
||||
TEST_F(TestReduceFp32, Max) {
|
||||
|
@ -333,7 +333,7 @@ TEST_F(TestReduceFp32, Max) {
|
|||
kernel_->Run();
|
||||
|
||||
int output_size = 24;
|
||||
CompareOutputData(out, correct, output_size, err_tol);
|
||||
ASSERT_EQ(0, CompareOutputData(out, correct, output_size, err_tol));
|
||||
}
|
||||
|
||||
TEST_F(TestReduceFp32, Min) {
|
||||
|
@ -360,7 +360,7 @@ TEST_F(TestReduceFp32, Min) {
|
|||
kernel_->Run();
|
||||
|
||||
int output_size = 24;
|
||||
CompareOutputData(out, correct, output_size, err_tol);
|
||||
ASSERT_EQ(0, CompareOutputData(out, correct, output_size, err_tol));
|
||||
}
|
||||
|
||||
TEST_F(TestReduceFp32, Prod) {
|
||||
|
@ -388,7 +388,7 @@ TEST_F(TestReduceFp32, Prod) {
|
|||
kernel_->Run();
|
||||
|
||||
int output_size = 24;
|
||||
CompareOutputData(out, correct, output_size, err_tol);
|
||||
ASSERT_EQ(0, CompareOutputData(out, correct, output_size, err_tol));
|
||||
}
|
||||
|
||||
TEST_F(TestReduceFp32, SumSquare) {
|
||||
|
@ -414,7 +414,7 @@ TEST_F(TestReduceFp32, SumSquare) {
|
|||
kernel_->Run();
|
||||
|
||||
int output_size = 8;
|
||||
CompareOutputData(out, correct, output_size, err_tol);
|
||||
ASSERT_EQ(0, CompareOutputData(out, correct, output_size, err_tol));
|
||||
}
|
||||
|
||||
TEST_F(TestReduceFp32, SumSquare2) {
|
||||
|
@ -443,7 +443,7 @@ TEST_F(TestReduceFp32, SumSquare2) {
|
|||
kernel_->Run();
|
||||
|
||||
int output_size = 32;
|
||||
CompareOutputData(out, correct, output_size, err_tol);
|
||||
ASSERT_EQ(0, CompareOutputData(out, correct, output_size, err_tol));
|
||||
}
|
||||
|
||||
TEST_F(TestReduceFp32, ASum) {
|
||||
|
@ -471,6 +471,6 @@ TEST_F(TestReduceFp32, ASum) {
|
|||
kernel_->Run();
|
||||
|
||||
int output_size = 32;
|
||||
CompareOutputData(out, correct, output_size, err_tol);
|
||||
ASSERT_EQ(0, CompareOutputData(out, correct, output_size, err_tol));
|
||||
}
|
||||
} // namespace mindspore
|
||||
|
|
|
@ -87,7 +87,7 @@ TEST_F(TestResizeBilinearFp32, ResizeBilinearTest1) {
|
|||
auto ret = kernel_->Run();
|
||||
EXPECT_EQ(0, ret);
|
||||
|
||||
CompareOutputData(output_data, expect.data(), output_size, err_tol);
|
||||
ASSERT_EQ(0, CompareOutputData(output_data, expect.data(), output_size, err_tol));
|
||||
}
|
||||
|
||||
// 2*2 -> 1*1
|
||||
|
@ -104,7 +104,7 @@ TEST_F(TestResizeBilinearFp32, ResizeBilinearTest2) {
|
|||
auto ret = kernel_->Run();
|
||||
EXPECT_EQ(0, ret);
|
||||
|
||||
CompareOutputData(output_data, expect.data(), output_size, err_tol);
|
||||
ASSERT_EQ(0, CompareOutputData(output_data, expect.data(), output_size, err_tol));
|
||||
}
|
||||
|
||||
// 2*2 -> 1*2
|
||||
|
@ -121,7 +121,7 @@ TEST_F(TestResizeBilinearFp32, ResizeBilinearTest3) {
|
|||
auto ret = kernel_->Run();
|
||||
EXPECT_EQ(0, ret);
|
||||
|
||||
CompareOutputData(output_data, expect.data(), output_size, err_tol);
|
||||
ASSERT_EQ(0, CompareOutputData(output_data, expect.data(), output_size, err_tol));
|
||||
}
|
||||
|
||||
// 2*2 -> 2*1
|
||||
|
@ -138,7 +138,7 @@ TEST_F(TestResizeBilinearFp32, ResizeBilinearTest4) {
|
|||
auto ret = kernel_->Run();
|
||||
EXPECT_EQ(0, ret);
|
||||
|
||||
CompareOutputData(output_data, expect.data(), output_size, err_tol);
|
||||
ASSERT_EQ(0, CompareOutputData(output_data, expect.data(), output_size, err_tol));
|
||||
}
|
||||
|
||||
// 2*2 -> 2*2
|
||||
|
@ -155,7 +155,7 @@ TEST_F(TestResizeBilinearFp32, ResizeBilinearTest5) {
|
|||
auto ret = kernel_->Run();
|
||||
EXPECT_EQ(0, ret);
|
||||
|
||||
CompareOutputData(output_data, expect.data(), output_size, err_tol);
|
||||
ASSERT_EQ(0, CompareOutputData(output_data, expect.data(), output_size, err_tol));
|
||||
}
|
||||
|
||||
// 2*2 -> 1*4
|
||||
|
@ -172,7 +172,7 @@ TEST_F(TestResizeBilinearFp32, ResizeBilinearTest6) {
|
|||
auto ret = kernel_->Run();
|
||||
EXPECT_EQ(0, ret);
|
||||
|
||||
CompareOutputData(output_data, expect.data(), output_size, err_tol);
|
||||
ASSERT_EQ(0, CompareOutputData(output_data, expect.data(), output_size, err_tol));
|
||||
}
|
||||
|
||||
// 2*2 -> 4*1
|
||||
|
@ -189,7 +189,7 @@ TEST_F(TestResizeBilinearFp32, ResizeBilinearTest7) {
|
|||
auto ret = kernel_->Run();
|
||||
EXPECT_EQ(0, ret);
|
||||
|
||||
CompareOutputData(output_data, expect.data(), output_size, err_tol);
|
||||
ASSERT_EQ(0, CompareOutputData(output_data, expect.data(), output_size, err_tol));
|
||||
}
|
||||
|
||||
// 2*2 -> 2*4
|
||||
|
@ -206,7 +206,7 @@ TEST_F(TestResizeBilinearFp32, ResizeBilinearTest8) {
|
|||
auto ret = kernel_->Run();
|
||||
EXPECT_EQ(0, ret);
|
||||
|
||||
CompareOutputData(output_data, expect.data(), output_size, err_tol);
|
||||
ASSERT_EQ(0, CompareOutputData(output_data, expect.data(), output_size, err_tol));
|
||||
}
|
||||
|
||||
// 2*2 -> 4*2
|
||||
|
@ -223,7 +223,7 @@ TEST_F(TestResizeBilinearFp32, ResizeBilinearTest9) {
|
|||
auto ret = kernel_->Run();
|
||||
EXPECT_EQ(0, ret);
|
||||
|
||||
CompareOutputData(output_data, expect.data(), output_size, err_tol);
|
||||
ASSERT_EQ(0, CompareOutputData(output_data, expect.data(), output_size, err_tol));
|
||||
}
|
||||
|
||||
// 2*2 -> 3*3
|
||||
|
@ -240,7 +240,7 @@ TEST_F(TestResizeBilinearFp32, ResizeBilinearTest10) {
|
|||
auto ret = kernel_->Run();
|
||||
EXPECT_EQ(0, ret);
|
||||
|
||||
CompareOutputData(output_data, expect.data(), output_size, err_tol);
|
||||
ASSERT_EQ(0, CompareOutputData(output_data, expect.data(), output_size, err_tol));
|
||||
}
|
||||
|
||||
// 2*2 -> 4*4
|
||||
|
@ -257,7 +257,7 @@ TEST_F(TestResizeBilinearFp32, ResizeBilinearTest11) {
|
|||
auto ret = kernel_->Run();
|
||||
EXPECT_EQ(0, ret);
|
||||
|
||||
CompareOutputData(output_data, expect.data(), output_size, err_tol);
|
||||
ASSERT_EQ(0, CompareOutputData(output_data, expect.data(), output_size, err_tol));
|
||||
}
|
||||
|
||||
// 2*2*2*5 -> 2*4*4*5
|
||||
|
@ -285,7 +285,7 @@ TEST_F(TestResizeBilinearFp32, ResizeBilinearTest12) {
|
|||
auto ret = kernel_->Run();
|
||||
EXPECT_EQ(0, ret);
|
||||
|
||||
CompareOutputData(output_data, expect.data(), output_size, err_tol);
|
||||
ASSERT_EQ(0, CompareOutputData(output_data, expect.data(), output_size, err_tol));
|
||||
}
|
||||
|
||||
// 2*2*2*5 -> 2*4*4*5 align corners
|
||||
|
@ -320,7 +320,7 @@ TEST_F(TestResizeBilinearFp32, ResizeBilinearTest13) {
|
|||
auto ret = kernel_->Run();
|
||||
EXPECT_EQ(0, ret);
|
||||
|
||||
CompareOutputData(output_data, expect.data(), output_size, err_tol);
|
||||
ASSERT_EQ(0, CompareOutputData(output_data, expect.data(), output_size, err_tol));
|
||||
}
|
||||
|
||||
// 2*2*2*5 -> 2*4*4*5 thread_num 2
|
||||
|
@ -349,7 +349,7 @@ TEST_F(TestResizeBilinearFp32, ResizeBilinearTest14) {
|
|||
auto ret = kernel_->Run();
|
||||
EXPECT_EQ(0, ret);
|
||||
|
||||
CompareOutputData(output_data, expect.data(), output_size, err_tol);
|
||||
ASSERT_EQ(0, CompareOutputData(output_data, expect.data(), output_size, err_tol));
|
||||
}
|
||||
|
||||
// 2*2*2*5 -> 2*4*4*5 thread_num 4
|
||||
|
@ -379,7 +379,7 @@ TEST_F(TestResizeBilinearFp32, ResizeBilinearTest15) {
|
|||
auto ret = kernel_->Run();
|
||||
EXPECT_EQ(0, ret);
|
||||
|
||||
CompareOutputData(output_data, expect.data(), output_size, err_tol);
|
||||
ASSERT_EQ(0, CompareOutputData(output_data, expect.data(), output_size, err_tol));
|
||||
}
|
||||
|
||||
// 5*5 -> 2*2
|
||||
|
@ -405,6 +405,6 @@ TEST_F(TestResizeBilinearFp32, ResizeBilinearTest16) {
|
|||
auto ret = kernel_->Run();
|
||||
EXPECT_EQ(0, ret);
|
||||
|
||||
CompareOutputData(output_data, expect.data(), output_size, err_tol);
|
||||
ASSERT_EQ(0, CompareOutputData(output_data, expect.data(), output_size, err_tol));
|
||||
}
|
||||
} // namespace mindspore
|
||||
|
|
|
@ -81,7 +81,7 @@ TEST_F(TestResizeNearestNeighborFp32, ResizeNearestNeighborTest1) {
|
|||
auto ret = kernel_->Run();
|
||||
EXPECT_EQ(0, ret);
|
||||
|
||||
CompareOutputData(output_data, expect.data(), output_size, err_tol);
|
||||
ASSERT_EQ(0, CompareOutputData(output_data, expect.data(), output_size, err_tol));
|
||||
}
|
||||
|
||||
// 2*2 -> 1*1
|
||||
|
@ -98,7 +98,7 @@ TEST_F(TestResizeNearestNeighborFp32, ResizeNearestNeighborTest2) {
|
|||
auto ret = kernel_->Run();
|
||||
EXPECT_EQ(0, ret);
|
||||
|
||||
CompareOutputData(output_data, expect.data(), output_size, err_tol);
|
||||
ASSERT_EQ(0, CompareOutputData(output_data, expect.data(), output_size, err_tol));
|
||||
}
|
||||
|
||||
// 2*2 -> 1*2
|
||||
|
@ -115,7 +115,7 @@ TEST_F(TestResizeNearestNeighborFp32, ResizeNearestNeighborTest3) {
|
|||
auto ret = kernel_->Run();
|
||||
EXPECT_EQ(0, ret);
|
||||
|
||||
CompareOutputData(output_data, expect.data(), output_size, err_tol);
|
||||
ASSERT_EQ(0, CompareOutputData(output_data, expect.data(), output_size, err_tol));
|
||||
}
|
||||
|
||||
// 2*2 -> 2*1
|
||||
|
@ -132,7 +132,7 @@ TEST_F(TestResizeNearestNeighborFp32, ResizeNearestNeighborTest4) {
|
|||
auto ret = kernel_->Run();
|
||||
EXPECT_EQ(0, ret);
|
||||
|
||||
CompareOutputData(output_data, expect.data(), output_size, err_tol);
|
||||
ASSERT_EQ(0, CompareOutputData(output_data, expect.data(), output_size, err_tol));
|
||||
}
|
||||
|
||||
// 2*2 -> 2*2
|
||||
|
@ -149,7 +149,7 @@ TEST_F(TestResizeNearestNeighborFp32, ResizeNearestNeighborTest5) {
|
|||
auto ret = kernel_->Run();
|
||||
EXPECT_EQ(0, ret);
|
||||
|
||||
CompareOutputData(output_data, expect.data(), output_size, err_tol);
|
||||
ASSERT_EQ(0, CompareOutputData(output_data, expect.data(), output_size, err_tol));
|
||||
}
|
||||
|
||||
// 2*2 -> 1*4
|
||||
|
@ -166,7 +166,7 @@ TEST_F(TestResizeNearestNeighborFp32, ResizeNearestNeighborTest6) {
|
|||
auto ret = kernel_->Run();
|
||||
EXPECT_EQ(0, ret);
|
||||
|
||||
CompareOutputData(output_data, expect.data(), output_size, err_tol);
|
||||
ASSERT_EQ(0, CompareOutputData(output_data, expect.data(), output_size, err_tol));
|
||||
}
|
||||
|
||||
// 2*2 -> 4*1
|
||||
|
@ -183,7 +183,7 @@ TEST_F(TestResizeNearestNeighborFp32, ResizeNearestNeighborTest7) {
|
|||
auto ret = kernel_->Run();
|
||||
EXPECT_EQ(0, ret);
|
||||
|
||||
CompareOutputData(output_data, expect.data(), output_size, err_tol);
|
||||
ASSERT_EQ(0, CompareOutputData(output_data, expect.data(), output_size, err_tol));
|
||||
}
|
||||
|
||||
// 2*2 -> 2*4
|
||||
|
@ -200,7 +200,7 @@ TEST_F(TestResizeNearestNeighborFp32, ResizeNearestNeighborTest8) {
|
|||
auto ret = kernel_->Run();
|
||||
EXPECT_EQ(0, ret);
|
||||
|
||||
CompareOutputData(output_data, expect.data(), output_size, err_tol);
|
||||
ASSERT_EQ(0, CompareOutputData(output_data, expect.data(), output_size, err_tol));
|
||||
}
|
||||
|
||||
// 2*2 -> 4*2
|
||||
|
@ -217,7 +217,7 @@ TEST_F(TestResizeNearestNeighborFp32, ResizeNearestNeighborTest9) {
|
|||
auto ret = kernel_->Run();
|
||||
EXPECT_EQ(0, ret);
|
||||
|
||||
CompareOutputData(output_data, expect.data(), output_size, err_tol);
|
||||
ASSERT_EQ(0, CompareOutputData(output_data, expect.data(), output_size, err_tol));
|
||||
}
|
||||
|
||||
// 2*2 -> 3*3
|
||||
|
@ -234,7 +234,7 @@ TEST_F(TestResizeNearestNeighborFp32, ResizeNearestNeighborTest10) {
|
|||
auto ret = kernel_->Run();
|
||||
EXPECT_EQ(0, ret);
|
||||
|
||||
CompareOutputData(output_data, expect.data(), output_size, err_tol);
|
||||
ASSERT_EQ(0, CompareOutputData(output_data, expect.data(), output_size, err_tol));
|
||||
}
|
||||
|
||||
// 2*2 -> 4*4
|
||||
|
@ -251,7 +251,7 @@ TEST_F(TestResizeNearestNeighborFp32, ResizeNearestNeighborTest11) {
|
|||
auto ret = kernel_->Run();
|
||||
EXPECT_EQ(0, ret);
|
||||
|
||||
CompareOutputData(output_data, expect.data(), output_size, err_tol);
|
||||
ASSERT_EQ(0, CompareOutputData(output_data, expect.data(), output_size, err_tol));
|
||||
}
|
||||
|
||||
// 2*2*2*5 -> 2*4*4*5
|
||||
|
@ -279,7 +279,7 @@ TEST_F(TestResizeNearestNeighborFp32, ResizeNearestNeighborTest12) {
|
|||
auto ret = kernel_->Run();
|
||||
EXPECT_EQ(0, ret);
|
||||
|
||||
CompareOutputData(output_data, expect.data(), output_size, err_tol);
|
||||
ASSERT_EQ(0, CompareOutputData(output_data, expect.data(), output_size, err_tol));
|
||||
}
|
||||
|
||||
// 2*2*2*5 -> 2*4*4*5 thread_num 2
|
||||
|
@ -307,7 +307,7 @@ TEST_F(TestResizeNearestNeighborFp32, ResizeNearestNeighborTest13) {
|
|||
auto ret = kernel_->Run();
|
||||
EXPECT_EQ(0, ret);
|
||||
|
||||
CompareOutputData(output_data, expect.data(), output_size, err_tol);
|
||||
ASSERT_EQ(0, CompareOutputData(output_data, expect.data(), output_size, err_tol));
|
||||
}
|
||||
|
||||
// 2*2*2*5 -> 2*4*4*5 thread_num 4
|
||||
|
@ -335,7 +335,7 @@ TEST_F(TestResizeNearestNeighborFp32, ResizeNearestNeighborTest14) {
|
|||
auto ret = kernel_->Run();
|
||||
EXPECT_EQ(0, ret);
|
||||
|
||||
CompareOutputData(output_data, expect.data(), output_size, err_tol);
|
||||
ASSERT_EQ(0, CompareOutputData(output_data, expect.data(), output_size, err_tol));
|
||||
}
|
||||
|
||||
// 4*4 -> 6*6 align_corners True
|
||||
|
@ -354,7 +354,7 @@ TEST_F(TestResizeNearestNeighborFp32, ResizeNearestNeighborTest15) {
|
|||
auto ret = kernel_->Run();
|
||||
EXPECT_EQ(0, ret);
|
||||
|
||||
CompareOutputData(output_data, expect.data(), output_size, err_tol);
|
||||
ASSERT_EQ(0, CompareOutputData(output_data, expect.data(), output_size, err_tol));
|
||||
}
|
||||
|
||||
// 2*7*5*8 -> 2*14*10*8 align_corners True
|
||||
|
@ -546,6 +546,6 @@ TEST_F(TestResizeNearestNeighborFp32, ResizeNearestNeighborTest16) {
|
|||
auto ret = kernel_->Run();
|
||||
EXPECT_EQ(0, ret);
|
||||
|
||||
CompareOutputData(output_data, expect.data(), output_size, err_tol);
|
||||
ASSERT_EQ(0, CompareOutputData(output_data, expect.data(), output_size, err_tol));
|
||||
}
|
||||
} // namespace mindspore
|
||||
|
|
|
@ -26,7 +26,8 @@ class TestROIPoolingFp32 : public mindspore::CommonTest {
|
|||
};
|
||||
|
||||
int ROIPoolingTestInit(std::vector<lite::Tensor *> *inputs_, std::vector<lite::Tensor *> *outputs_, float *a_ptr,
|
||||
float *b_ptr, std::vector<int> a_shape, std::vector<int> b_shape, std::vector<int> c_shape) {
|
||||
float *b_ptr, const std::vector<int> &a_shape, const std::vector<int> &b_shape,
|
||||
const std::vector<int> &c_shape) {
|
||||
auto in_t = new lite::Tensor(kNumberTypeFloat, a_shape, schema::Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
|
||||
in_t->MallocData();
|
||||
memcpy(in_t->MutableData(), a_ptr, sizeof(float) * in_t->ElementsNum());
|
||||
|
@ -61,15 +62,14 @@ TEST_F(TestROIPoolingFp32, Simple) {
|
|||
auto ctx = new lite::InnerContext;
|
||||
ctx->thread_num_ = 3;
|
||||
ASSERT_EQ(lite::RET_OK, ctx->Init());
|
||||
kernel::ROIPoolingCPUKernel *op =
|
||||
new kernel::ROIPoolingCPUKernel(reinterpret_cast<OpParameter *>(param), inputs_, outputs_, ctx, nullptr);
|
||||
auto *op = new kernel::ROIPoolingCPUKernel(reinterpret_cast<OpParameter *>(param), inputs_, outputs_, ctx, nullptr);
|
||||
op->Init();
|
||||
op->Run();
|
||||
float correct[] = {25, 31, 34, 35, 25, 31, 34, 35};
|
||||
float *output = reinterpret_cast<float *>(outputs_[0]->MutableData());
|
||||
for (int i = 0; i < 8; ++i) printf("%f ", output[i]);
|
||||
printf("\n");
|
||||
CompareOutputData(reinterpret_cast<float *>(outputs_[0]->MutableData()), correct, total_size, 0.0001);
|
||||
ASSERT_EQ(0, CompareOutputData(reinterpret_cast<float *>(outputs_[0]->MutableData()), correct, total_size, 0.0001));
|
||||
delete op;
|
||||
for (auto t : inputs_) delete t;
|
||||
for (auto t : outputs_) delete t;
|
||||
|
|
|
@ -112,7 +112,7 @@ TEST_F(TestScaleFp32, ScaleNoAct) {
|
|||
|
||||
std::vector<float> expect{1.0, 3.0, 7.0, 4.0, 9.0, 16.0, 7.0, 15.0, 25.0, 10.0, 21.0, 34.0};
|
||||
|
||||
CompareOutputData(out_data, expect.data(), 12, err_tol);
|
||||
ASSERT_EQ(0, CompareOutputData(out_data, expect.data(), 12, err_tol));
|
||||
}
|
||||
|
||||
TEST_F(TestScaleFp32, ScaleRelu) {
|
||||
|
@ -134,7 +134,7 @@ TEST_F(TestScaleFp32, ScaleRelu) {
|
|||
|
||||
std::vector<float> expect{0.0, 0.0, 1.0, 0.0, 3.0, 10.0, 1.0, 9.0, 19.0, 4.0, 15.0, 28.0};
|
||||
|
||||
CompareOutputData(out_data, expect.data(), 12, err_tol);
|
||||
ASSERT_EQ(0, CompareOutputData(out_data, expect.data(), 12, err_tol));
|
||||
}
|
||||
TEST_F(TestScaleFp32, ScaleRelu6) {
|
||||
std::vector<int> input_shape{1, 2, 2, 3};
|
||||
|
@ -155,6 +155,6 @@ TEST_F(TestScaleFp32, ScaleRelu6) {
|
|||
|
||||
std::vector<float> expect{0.0, 0.0, 1.0, 0.0, 3.0, 6.0, 1.0, 6.0, 6.0, 4.0, 6.0, 6.0};
|
||||
|
||||
CompareOutputData(out_data, expect.data(), 12, err_tol);
|
||||
ASSERT_EQ(0, CompareOutputData(out_data, expect.data(), 12, err_tol));
|
||||
}
|
||||
} // namespace mindspore
|
||||
|
|
|
@ -39,11 +39,11 @@ TEST_F(SpaceToBatchTestFp32, SpaceToBatchTest4) {
|
|||
param.block_sizes_[0] = 2;
|
||||
param.block_sizes_[1] = 1;
|
||||
DoSpaceToBatchNHWC(input.data(), out, param.block_sizes_, in_shape.data(), out_shape.data());
|
||||
for (unsigned int i = 0; i < kOutSize; ++i) {
|
||||
std::cout << out[i] << " ";
|
||||
for (float i : out) {
|
||||
std::cout << i << " ";
|
||||
}
|
||||
std::cout << "\n";
|
||||
CompareOutputData(out, expect_out.data(), kOutSize, 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(out, expect_out.data(), kOutSize, 0.000001));
|
||||
}
|
||||
|
||||
TEST_F(SpaceToBatchTestFp32, SpaceToBatchTest5) {
|
||||
|
@ -61,7 +61,7 @@ TEST_F(SpaceToBatchTestFp32, SpaceToBatchTest5) {
|
|||
std::cout << out[i] << " ";
|
||||
}
|
||||
std::cout << "\n";
|
||||
CompareOutputData(out, expect_out.data(), kOutSize, 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(out, expect_out.data(), kOutSize, 0.000001));
|
||||
}
|
||||
|
||||
TEST_F(SpaceToBatchTestFp32, SpaceToBatchTest6) {
|
||||
|
@ -79,7 +79,7 @@ TEST_F(SpaceToBatchTestFp32, SpaceToBatchTest6) {
|
|||
std::cout << out[i] << " ";
|
||||
}
|
||||
std::cout << "\n";
|
||||
CompareOutputData(out, expect_out.data(), kOutSize, 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(out, expect_out.data(), kOutSize, 0.000001));
|
||||
}
|
||||
|
||||
TEST_F(SpaceToBatchTestFp32, SpaceToBatchTest7) {
|
||||
|
@ -101,7 +101,7 @@ TEST_F(SpaceToBatchTestFp32, SpaceToBatchTest7) {
|
|||
std::cout << out[i] << " ";
|
||||
}
|
||||
std::cout << "\n";
|
||||
CompareOutputData(out, expect_out.data(), kOutSize, 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(out, expect_out.data(), kOutSize, 0.000001));
|
||||
}
|
||||
|
||||
TEST_F(SpaceToBatchTestFp32, SpaceToBatchTest8) {
|
||||
|
@ -120,7 +120,7 @@ TEST_F(SpaceToBatchTestFp32, SpaceToBatchTest8) {
|
|||
std::cout << out[i] << " ";
|
||||
}
|
||||
std::cout << "\n";
|
||||
CompareOutputData(out, expect_out.data(), kOutSize, 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(out, expect_out.data(), kOutSize, 0.000001));
|
||||
}
|
||||
|
||||
TEST_F(SpaceToBatchTestFp32, SpaceToBatchTest9) {
|
||||
|
@ -140,7 +140,7 @@ TEST_F(SpaceToBatchTestFp32, SpaceToBatchTest9) {
|
|||
std::cout << out[i] << " ";
|
||||
}
|
||||
std::cout << "\n";
|
||||
CompareOutputData(out, expect_out.data(), kOutSize, 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(out, expect_out.data(), kOutSize, 0.000001));
|
||||
}
|
||||
|
||||
TEST_F(SpaceToBatchTestFp32, SpaceToBatchTest10) {
|
||||
|
@ -166,6 +166,6 @@ TEST_F(SpaceToBatchTestFp32, SpaceToBatchTest10) {
|
|||
std::cout << out[i] << " ";
|
||||
}
|
||||
std::cout << "\n";
|
||||
CompareOutputData(out, expect_out.data(), kOutSize, 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(out, expect_out.data(), kOutSize, 0.000001));
|
||||
}
|
||||
} // namespace mindspore
|
||||
|
|
|
@ -44,7 +44,7 @@ TEST_F(SpaceToDepthTestFp32, SpaceToDepthTest1) {
|
|||
std::cout << output[i] << " ";
|
||||
}
|
||||
std::cout << "\n";
|
||||
CompareOutputData(output, expect_out, out_size, 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(output, expect_out, out_size, 0.000001));
|
||||
}
|
||||
|
||||
TEST_F(SpaceToDepthTestFp32, SpaceToDepthTest2) {
|
||||
|
@ -89,7 +89,7 @@ TEST_F(SpaceToDepthTestFp32, SpaceToDepthTest2) {
|
|||
std::cout << output[i] << " ";
|
||||
}
|
||||
std::cout << "\n";
|
||||
CompareOutputData(output.data(), expect_out, out_size, 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(output.data(), expect_out, out_size, 0.000001));
|
||||
}
|
||||
|
||||
} // namespace mindspore
|
||||
|
|
|
@ -99,7 +99,7 @@ TEST_F(TestSparseToDenseFp32, SparseToDense_test1) {
|
|||
0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1};
|
||||
PrintData("output data", output, output_size);
|
||||
PrintData("output data shape", output_tensor_shape.data(), output_tensor_shape.size());
|
||||
CompareOutputData(output, except_result.data(), output_size, 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(output, except_result.data(), output_size, 0.000001));
|
||||
|
||||
input_tensor1->set_data(nullptr);
|
||||
input_tensor2->set_data(nullptr);
|
||||
|
@ -183,7 +183,7 @@ TEST_F(TestSparseToDenseFp32, SparseToDense_test2) {
|
|||
0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6};
|
||||
PrintData("output data", output, output_size);
|
||||
PrintData("output data shape", output_tensor_shape.data(), output_tensor_shape.size());
|
||||
CompareOutputData(output, except_result.data(), output_size, 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(output, except_result.data(), output_size, 0.000001));
|
||||
|
||||
input_tensor1->set_data(nullptr);
|
||||
input_tensor2->set_data(nullptr);
|
||||
|
@ -265,7 +265,7 @@ TEST_F(TestSparseToDenseFp32, SparseToDense_test3) {
|
|||
std::vector<float> except_result = {0, 1, 0, 1, 1, 0, 0, 0, 0, 0};
|
||||
PrintData("output data", output, output_size);
|
||||
PrintData("output data shape", output_tensor_shape.data(), output_tensor_shape.size());
|
||||
CompareOutputData(output, except_result.data(), output_size, 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(output, except_result.data(), output_size, 0.000001));
|
||||
|
||||
input_tensor1->set_data(nullptr);
|
||||
input_tensor2->set_data(nullptr);
|
||||
|
@ -347,7 +347,7 @@ TEST_F(TestSparseToDenseFp32, SparseToDense_test4) {
|
|||
std::vector<float> except_result = {0, 0, 0, 0, 0, 1, 0, 0, 0, 0};
|
||||
PrintData("output data", output, output_size);
|
||||
PrintData("output data shape", output_tensor_shape.data(), output_tensor_shape.size());
|
||||
CompareOutputData(output, except_result.data(), output_size, 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(output, except_result.data(), output_size, 0.000001));
|
||||
|
||||
input_tensor1->set_data(nullptr);
|
||||
input_tensor2->set_data(nullptr);
|
||||
|
@ -431,7 +431,7 @@ TEST_F(TestSparseToDenseFp32, SparseToDense_test5) {
|
|||
0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6};
|
||||
PrintData("output data", output, output_size);
|
||||
PrintData("output data shape", output_tensor_shape.data(), output_tensor_shape.size());
|
||||
CompareOutputData(output, except_result.data(), output_size, 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(output, except_result.data(), output_size, 0.000001));
|
||||
|
||||
input_tensor1->set_data(nullptr);
|
||||
input_tensor2->set_data(nullptr);
|
||||
|
|
|
@ -36,11 +36,11 @@ TEST_F(StackTestFp32, StackTest1) {
|
|||
float expect_out[kOutSize] = {1, 4, 7, 2, 5, 8, 3, 6, 9, 10, 40, 70, 20, 50, 80, 30, 60, 90};
|
||||
float output[kOutSize];
|
||||
DoStack(input, 3, shape.data(), shape.size(), axis, output);
|
||||
for (int i = 0; i < kOutSize; ++i) {
|
||||
std::cout << output[i] << " ";
|
||||
for (float i : output) {
|
||||
std::cout << i << " ";
|
||||
}
|
||||
std::cout << "\n";
|
||||
CompareOutputData(output, expect_out, kOutSize, 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(output, expect_out, kOutSize, 0.000001));
|
||||
}
|
||||
|
||||
} // namespace mindspore
|
||||
|
|
|
@ -85,7 +85,7 @@ TEST_F(TestStridedSliceFp32, StridedSlice1) {
|
|||
std::cout << correct[0] << " , " << correct[1];
|
||||
std::cout << std::endl;
|
||||
|
||||
CompareOutputData(output_data, correct, 2, 0.00001);
|
||||
ASSERT_EQ(0, CompareOutputData(output_data, correct, 2, 0.00001));
|
||||
|
||||
delete strided_slice_param;
|
||||
MS_LOG(INFO) << "Teststrided_sliceFp32 passed";
|
||||
|
@ -111,7 +111,7 @@ TEST_F(TestStridedSliceFp32, StridedSlice2) {
|
|||
// runtime part
|
||||
DoStridedSlice(input_data, output_data, strided_slice_param);
|
||||
|
||||
CompareOutputData(output_data, correct, 9, 0.00001);
|
||||
ASSERT_EQ(0, CompareOutputData(output_data, correct, 9, 0.00001));
|
||||
|
||||
delete strided_slice_param;
|
||||
}
|
||||
|
@ -162,7 +162,7 @@ TEST_F(TestStridedSliceFp32, StridedSlice3) {
|
|||
kernel->Run();
|
||||
delete ctx;
|
||||
|
||||
CompareOutputData(output_data, correct, 2, 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(output_data, correct, 2, 0.000001));
|
||||
input_tensor.set_data(nullptr);
|
||||
output_tensor.set_data(nullptr);
|
||||
}
|
||||
|
@ -199,7 +199,7 @@ TEST_F(TestStridedSliceFp32, StridedSlice4) {
|
|||
output_tensor.set_data_type(input_tensor.data_type());
|
||||
output_tensor.set_shape(output_shape);
|
||||
|
||||
lite::InnerContext *ctx = new lite::InnerContext;
|
||||
auto *ctx = new lite::InnerContext;
|
||||
ctx->thread_num_ = 2;
|
||||
ASSERT_EQ(lite::RET_OK, ctx->Init());
|
||||
strided_slice_param->op_parameter_.type_ = schema::PrimitiveType_StridedSlice;
|
||||
|
@ -212,7 +212,7 @@ TEST_F(TestStridedSliceFp32, StridedSlice4) {
|
|||
kernel->Run();
|
||||
delete ctx;
|
||||
|
||||
CompareOutputData(output_data, correct, 4, 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(output_data, correct, 4, 0.000001));
|
||||
input_tensor.set_data(nullptr);
|
||||
output_tensor.set_data(nullptr);
|
||||
}
|
||||
|
@ -256,7 +256,7 @@ TEST_F(TestStridedSliceFp32, StridedSlice5) {
|
|||
output_tensor.set_data_type(input_tensor.data_type());
|
||||
output_tensor.set_shape(output_shape);
|
||||
|
||||
lite::InnerContext *ctx = new lite::InnerContext;
|
||||
auto *ctx = new lite::InnerContext;
|
||||
ctx->thread_num_ = 2;
|
||||
ASSERT_EQ(lite::RET_OK, ctx->Init());
|
||||
strided_slice_param->op_parameter_.type_ = schema::PrimitiveType_StridedSlice;
|
||||
|
@ -269,7 +269,7 @@ TEST_F(TestStridedSliceFp32, StridedSlice5) {
|
|||
kernel->Run();
|
||||
delete ctx;
|
||||
|
||||
CompareOutputData(output_data, correct, 12, 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(output_data, correct, 12, 0.000001));
|
||||
input_tensor.set_data(nullptr);
|
||||
output_tensor.set_data(nullptr);
|
||||
}
|
||||
|
@ -313,7 +313,7 @@ TEST_F(TestStridedSliceFp32, StridedSlice6) {
|
|||
output_tensor.set_data_type(input_tensor.data_type());
|
||||
output_tensor.set_shape(output_shape);
|
||||
|
||||
lite::InnerContext *ctx = new lite::InnerContext;
|
||||
auto *ctx = new lite::InnerContext;
|
||||
ctx->thread_num_ = 2;
|
||||
ASSERT_EQ(lite::RET_OK, ctx->Init());
|
||||
strided_slice_param->op_parameter_.type_ = schema::PrimitiveType_StridedSlice;
|
||||
|
@ -326,7 +326,7 @@ TEST_F(TestStridedSliceFp32, StridedSlice6) {
|
|||
kernel->Run();
|
||||
delete ctx;
|
||||
|
||||
CompareOutputData(output_data, correct, 8, 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(output_data, correct, 8, 0.000001));
|
||||
input_tensor.set_data(nullptr);
|
||||
output_tensor.set_data(nullptr);
|
||||
}
|
||||
|
@ -375,7 +375,7 @@ TEST_F(TestStridedSliceFp32, StridedSlice7) {
|
|||
kernel->Run();
|
||||
delete ctx;
|
||||
|
||||
CompareOutputData(output_data, correct, 1, 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(output_data, correct, 1, 0.000001));
|
||||
input_tensor.set_data(nullptr);
|
||||
output_tensor.set_data(nullptr);
|
||||
}
|
||||
|
@ -432,7 +432,7 @@ TEST_F(TestStridedSliceFp32, StridedSlice8) {
|
|||
kernel->Run();
|
||||
delete ctx;
|
||||
|
||||
CompareOutputData(output_data, correct, 5, 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(output_data, correct, 5, 0.000001));
|
||||
input_tensor.set_data(nullptr);
|
||||
output_tensor.set_data(nullptr);
|
||||
}
|
||||
|
@ -582,7 +582,7 @@ TEST_F(TestStridedSliceFp32, StridedSlice9) {
|
|||
kernel->Run();
|
||||
delete ctx;
|
||||
|
||||
CompareOutputData(output_data, correct, 490, 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(output_data, correct, 490, 0.000001));
|
||||
input_tensor.set_data(nullptr);
|
||||
output_tensor.set_data(nullptr);
|
||||
}
|
||||
|
|
|
@ -67,7 +67,7 @@ TEST_F(TestTransposeFp32, TransposeFp32_axes4) {
|
|||
auto ret = DoTransposeFp32(in, out, input_shape, output_shape, param, 0, 3, nullptr, nullptr);
|
||||
ASSERT_EQ(ret, 0);
|
||||
delete param;
|
||||
CompareOutputData(out, correct, 24, 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(out, correct, 24, 0.000001));
|
||||
}
|
||||
|
||||
TEST_F(TestTransposeFp32, TransposeFp32_axes3) {
|
||||
|
@ -107,7 +107,7 @@ TEST_F(TestTransposeFp32, TransposeFp32_axes3) {
|
|||
auto ret = DoTransposeFp32(in, out, input_shape, output_shape, param, 0, 3, nullptr, nullptr);
|
||||
ASSERT_EQ(ret, 0);
|
||||
delete param;
|
||||
CompareOutputData(out, correct, 24, 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(out, correct, 24, 0.000001));
|
||||
}
|
||||
|
||||
TEST_F(TestTransposeFp32, TransposeFp32_axes2) {
|
||||
|
@ -148,7 +148,7 @@ TEST_F(TestTransposeFp32, TransposeFp32_axes2) {
|
|||
auto ret = DoTransposeFp32(in, out, input_shape, output_shape, param, 0, 6, nullptr, nullptr);
|
||||
ASSERT_EQ(ret, 0);
|
||||
delete param;
|
||||
CompareOutputData(out, correct, 24, 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(out, correct, 24, 0.000001));
|
||||
}
|
||||
|
||||
TEST_F(TestTransposeFp32, TransposeFp32_test5) {
|
||||
|
@ -213,7 +213,7 @@ TEST_F(TestTransposeFp32, TransposeFp32_test5) {
|
|||
std::cout << output[i] << " ";
|
||||
}
|
||||
std::cout << "\n";
|
||||
CompareOutputData(output.data(), correct, 24, 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(output.data(), correct, 24, 0.000001));
|
||||
input_tensor.set_data(nullptr);
|
||||
output_tensor.set_data(nullptr);
|
||||
}
|
||||
|
|
|
@ -21,7 +21,6 @@
|
|||
#include "src/common/log_adapter.h"
|
||||
#include "common/common_test.h"
|
||||
#include "src/common/file_utils.h"
|
||||
#include "src/common/file_utils_ext.h"
|
||||
#include "mindspore/lite/src/kernel_registry.h"
|
||||
#include "mindspore/lite/src/tensor.h"
|
||||
#include "mindspore/lite/src/lite_kernel.h"
|
||||
|
@ -71,7 +70,7 @@ TEST_F(TestActGradFp32, ReluGradFp32) {
|
|||
|
||||
std::string output_path = "./test_data/activationGrad/relu_out_50.bin";
|
||||
|
||||
int res = lite::CompareRelativeOutput(output_data, output_path);
|
||||
int res = CompareRelativeOutput(output_data, output_path);
|
||||
|
||||
EXPECT_EQ(res, 0);
|
||||
|
||||
|
@ -117,7 +116,7 @@ TEST_F(TestActGradFp32, Relu6GradFp32) {
|
|||
std::cout << std::endl;
|
||||
|
||||
std::string output_path = "./test_data/activationGrad/relu6_out_50.bin";
|
||||
int res = lite::CompareRelativeOutput(output_data, output_path);
|
||||
int res = CompareRelativeOutput(output_data, output_path);
|
||||
|
||||
EXPECT_EQ(res, 0);
|
||||
|
||||
|
@ -163,7 +162,7 @@ TEST_F(TestActGradFp32, LReluGradFp32) {
|
|||
std::cout << std::endl;
|
||||
|
||||
std::string output_path = "./test_data/activationGrad/lrelu_out_50.bin";
|
||||
int res = lite::CompareRelativeOutput(output_data, output_path);
|
||||
int res = CompareRelativeOutput(output_data, output_path);
|
||||
|
||||
EXPECT_EQ(res, 0);
|
||||
|
||||
|
@ -209,10 +208,10 @@ TEST_F(TestActGradFp32, SigmoidGradFp32) {
|
|||
std::cout << std::endl;
|
||||
|
||||
std::string output_path = "./test_data/activationGrad/sigmoid_out_50.bin";
|
||||
int res = lite::CompareRelativeOutput(output_data, output_path);
|
||||
int res = CompareRelativeOutput(output_data, output_path);
|
||||
|
||||
EXPECT_EQ(res, 0);
|
||||
// lite::CompareOutput(output_data, output_data_size, output_path);
|
||||
// CompareOutput(output_data, output_data_size, output_path);
|
||||
|
||||
delete[] input_data;
|
||||
delete[] output_data;
|
||||
|
@ -256,7 +255,7 @@ TEST_F(TestActGradFp32, tanhGradFp32) {
|
|||
std::cout << std::endl;
|
||||
|
||||
std::string output_path = "./test_data/activationGrad/tanh_out_50.bin";
|
||||
int res = lite::CompareRelativeOutput(output_data, output_path);
|
||||
int res = CompareRelativeOutput(output_data, output_path);
|
||||
|
||||
EXPECT_EQ(res, 0);
|
||||
|
||||
|
@ -303,7 +302,7 @@ TEST_F(TestActGradFp32, hswishGradFp32) {
|
|||
std::cout << std::endl;
|
||||
|
||||
std::string output_path = "./test_data/activationGrad/hswish_out_50.bin";
|
||||
int res = lite::CompareRelativeOutput(output_data, output_path);
|
||||
int res = CompareRelativeOutput(output_data, output_path);
|
||||
|
||||
EXPECT_EQ(res, 0);
|
||||
|
||||
|
@ -312,5 +311,4 @@ TEST_F(TestActGradFp32, hswishGradFp32) {
|
|||
delete[] yt_data;
|
||||
MS_LOG(INFO) << "hswishGradFp32 passed";
|
||||
}
|
||||
|
||||
} // namespace mindspore
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
#include "src/common/log_adapter.h"
|
||||
#include "common/common_test.h"
|
||||
#include "src/common/file_utils.h"
|
||||
#include "src/common/file_utils_ext.h"
|
||||
#include "nnacl/fp32/reduce.h"
|
||||
#include "src/runtime/kernel/arm/fp32_grad/arithmetic_grad.h"
|
||||
#include "src/kernel_registry.h"
|
||||
|
@ -129,10 +128,10 @@ TEST_F(TestArithmeticGradFp32, TestAddGradFp32) {
|
|||
std::cout << std::endl;
|
||||
|
||||
std::string output_path = "./test_data/operators/arithmetic_fp32_1_dx1_4_6.bin";
|
||||
EXPECT_EQ(0, lite::CompareRelativeOutput(reinterpret_cast<float *>(outputs[0]->MutableData()), output_path));
|
||||
EXPECT_EQ(0, CompareRelativeOutput(reinterpret_cast<float *>(outputs[0]->MutableData()), output_path));
|
||||
|
||||
std::string dx2_path = "./test_data/operators/arithmetic_fp32_1_dx2_1_6.bin";
|
||||
EXPECT_EQ(0, lite::CompareRelativeOutput(output_ptr, dx2_path));
|
||||
EXPECT_EQ(0, CompareRelativeOutput(output_ptr, dx2_path));
|
||||
for (auto tensor : all_tensors) {
|
||||
delete[] reinterpret_cast<float *>(tensor->MutableData());
|
||||
tensor->set_data(nullptr);
|
||||
|
@ -167,10 +166,10 @@ TEST_F(TestArithmeticGradFp32, TestAddGrad2Fp32) {
|
|||
std::cout << std::endl;
|
||||
|
||||
std::string output_path = "./test_data/operators/arithmetic_fp32_1_dx1_4_6.bin";
|
||||
EXPECT_EQ(0, lite::CompareRelativeOutput(reinterpret_cast<float *>(outputs[1]->MutableData()), output_path));
|
||||
EXPECT_EQ(0, CompareRelativeOutput(reinterpret_cast<float *>(outputs[1]->MutableData()), output_path));
|
||||
|
||||
std::string dx2_path = "./test_data/operators/arithmetic_fp32_1_dx2_1_6.bin";
|
||||
EXPECT_EQ(0, lite::CompareRelativeOutput(output_ptr, dx2_path));
|
||||
EXPECT_EQ(0, CompareRelativeOutput(output_ptr, dx2_path));
|
||||
for (auto tensor : all_tensors) {
|
||||
delete[] reinterpret_cast<float *>(tensor->MutableData());
|
||||
tensor->set_data(nullptr);
|
||||
|
@ -207,10 +206,10 @@ TEST_F(TestArithmeticGradFp32, TestAddGrad3Fp32) {
|
|||
std::cout << std::endl;
|
||||
|
||||
std::string output_path = "./test_data/operators/arithmetic_fp32_8_dx2_5_1_6.bin";
|
||||
EXPECT_EQ(0, lite::CompareRelativeOutput(reinterpret_cast<float *>(outputs[1]->MutableData()), output_path));
|
||||
EXPECT_EQ(0, CompareRelativeOutput(reinterpret_cast<float *>(outputs[1]->MutableData()), output_path));
|
||||
|
||||
std::string dx2_path = "./test_data/operators/arithmetic_fp32_8_dx1_5_4_6.bin";
|
||||
EXPECT_EQ(0, lite::CompareRelativeOutput(output_ptr, dx2_path));
|
||||
EXPECT_EQ(0, CompareRelativeOutput(output_ptr, dx2_path));
|
||||
|
||||
for (auto tensor : all_tensors) {
|
||||
delete[] reinterpret_cast<float *>(tensor->MutableData());
|
||||
|
@ -248,10 +247,10 @@ TEST_F(TestArithmeticGradFp32, TestSubGradFp32) {
|
|||
std::cout << std::endl;
|
||||
|
||||
std::string output_path = "./test_data/operators/arithmetic_fp32_2_dx1_4_6.bin";
|
||||
EXPECT_EQ(0, lite::CompareRelativeOutput(reinterpret_cast<float *>(outputs[0]->MutableData()), output_path));
|
||||
EXPECT_EQ(0, CompareRelativeOutput(reinterpret_cast<float *>(outputs[0]->MutableData()), output_path));
|
||||
|
||||
std::string dx2_path = "./test_data/operators/arithmetic_fp32_2_dx2_1_6.bin";
|
||||
EXPECT_EQ(0, lite::CompareRelativeOutput(output_ptr, dx2_path));
|
||||
EXPECT_EQ(0, CompareRelativeOutput(output_ptr, dx2_path));
|
||||
|
||||
for (auto tensor : all_tensors) {
|
||||
delete[] reinterpret_cast<float *>(tensor->MutableData());
|
||||
|
@ -289,10 +288,10 @@ TEST_F(TestArithmeticGradFp32, TestSubGrad2Fp32) {
|
|||
std::cout << std::endl;
|
||||
|
||||
std::string output_path = "./test_data/operators/arithmetic_fp32_3_dx1_4_6.bin";
|
||||
EXPECT_EQ(0, lite::CompareRelativeOutput(reinterpret_cast<float *>(outputs[1]->MutableData()), output_path));
|
||||
EXPECT_EQ(0, CompareRelativeOutput(reinterpret_cast<float *>(outputs[1]->MutableData()), output_path));
|
||||
|
||||
std::string dx2_path = "./test_data/operators/arithmetic_fp32_3_dx2_1_6.bin";
|
||||
EXPECT_EQ(0, lite::CompareRelativeOutput(output_ptr, dx2_path));
|
||||
EXPECT_EQ(0, CompareRelativeOutput(output_ptr, dx2_path));
|
||||
|
||||
for (auto tensor : all_tensors) {
|
||||
delete[] reinterpret_cast<float *>(tensor->MutableData());
|
||||
|
@ -338,10 +337,10 @@ TEST_F(TestArithmeticGradFp32, TestMulGradFp32) {
|
|||
std::cout << std::endl;
|
||||
|
||||
std::string output_path = "./test_data/operators/arithmetic_fp32_4_dx1_4_6.bin";
|
||||
EXPECT_EQ(0, lite::CompareRelativeOutput(reinterpret_cast<float *>(outputs[0]->MutableData()), output_path));
|
||||
EXPECT_EQ(0, CompareRelativeOutput(reinterpret_cast<float *>(outputs[0]->MutableData()), output_path));
|
||||
|
||||
std::string dx2_path = "./test_data/operators/arithmetic_fp32_4_dx2_1_6.bin";
|
||||
EXPECT_EQ(0, lite::CompareRelativeOutput(output_ptr, dx2_path));
|
||||
EXPECT_EQ(0, CompareRelativeOutput(output_ptr, dx2_path));
|
||||
for (auto tensor : all_tensors) {
|
||||
delete[] reinterpret_cast<float *>(tensor->MutableData());
|
||||
tensor->set_data(nullptr);
|
||||
|
@ -377,10 +376,10 @@ TEST_F(TestArithmeticGradFp32, TestMulGrad2Fp32) {
|
|||
std::cout << std::endl;
|
||||
|
||||
std::string output_path = "./test_data/operators/arithmetic_fp32_4_dx1_4_6.bin";
|
||||
EXPECT_EQ(0, lite::CompareRelativeOutput(reinterpret_cast<float *>(outputs[1]->MutableData()), output_path));
|
||||
EXPECT_EQ(0, CompareRelativeOutput(reinterpret_cast<float *>(outputs[1]->MutableData()), output_path));
|
||||
|
||||
std::string dx2_path = "./test_data/operators/arithmetic_fp32_4_dx2_1_6.bin";
|
||||
EXPECT_EQ(0, lite::CompareRelativeOutput(output_ptr, dx2_path));
|
||||
EXPECT_EQ(0, CompareRelativeOutput(output_ptr, dx2_path));
|
||||
for (auto tensor : all_tensors) {
|
||||
delete[] reinterpret_cast<float *>(tensor->MutableData());
|
||||
tensor->set_data(nullptr);
|
||||
|
@ -417,10 +416,10 @@ TEST_F(TestArithmeticGradFp32, TestMulGrad3Fp32) {
|
|||
std::cout << std::endl;
|
||||
|
||||
std::string output_path = "./test_data/operators/arithmetic_fp32_9_dx1_5_4_6.bin";
|
||||
EXPECT_EQ(0, lite::CompareRelativeOutput(reinterpret_cast<float *>(outputs[0]->MutableData()), output_path));
|
||||
EXPECT_EQ(0, CompareRelativeOutput(reinterpret_cast<float *>(outputs[0]->MutableData()), output_path));
|
||||
|
||||
std::string dx2_path = "./test_data/operators/arithmetic_fp32_9_dx2_5_1_6.bin";
|
||||
EXPECT_EQ(0, lite::CompareRelativeOutput(output_ptr, dx2_path));
|
||||
EXPECT_EQ(0, CompareRelativeOutput(output_ptr, dx2_path));
|
||||
for (auto tensor : all_tensors) {
|
||||
delete[] reinterpret_cast<float *>(tensor->MutableData());
|
||||
tensor->set_data(nullptr);
|
||||
|
@ -457,10 +456,10 @@ TEST_F(TestArithmeticGradFp32, TestMulGrad4Fp32) {
|
|||
std::cout << std::endl;
|
||||
|
||||
std::string output_path = "./test_data/operators/arithmetic_fp32_9_dx1_5_4_6.bin";
|
||||
EXPECT_EQ(0, lite::CompareRelativeOutput(reinterpret_cast<float *>(outputs[1]->MutableData()), output_path));
|
||||
EXPECT_EQ(0, CompareRelativeOutput(reinterpret_cast<float *>(outputs[1]->MutableData()), output_path));
|
||||
|
||||
std::string dx2_path = "./test_data/operators/arithmetic_fp32_9_dx2_5_1_6.bin";
|
||||
EXPECT_EQ(0, lite::CompareRelativeOutput(output_ptr, dx2_path));
|
||||
EXPECT_EQ(0, CompareRelativeOutput(output_ptr, dx2_path));
|
||||
for (auto tensor : all_tensors) {
|
||||
delete[] reinterpret_cast<float *>(tensor->MutableData());
|
||||
tensor->set_data(nullptr);
|
||||
|
@ -497,10 +496,10 @@ TEST_F(TestArithmeticGradFp32, TestDivGradFp32) {
|
|||
std::cout << std::endl;
|
||||
|
||||
std::string output_path = "./test_data/operators/arithmetic_fp32_5_dx1_4_6.bin";
|
||||
EXPECT_EQ(0, lite::CompareRelativeOutput(reinterpret_cast<float *>(outputs[0]->MutableData()), output_path));
|
||||
EXPECT_EQ(0, CompareRelativeOutput(reinterpret_cast<float *>(outputs[0]->MutableData()), output_path));
|
||||
|
||||
std::string dx2_path = "./test_data/operators/arithmetic_fp32_5_dx2_1_6.bin";
|
||||
EXPECT_EQ(0, lite::CompareRelativeOutput(output_ptr, dx2_path));
|
||||
EXPECT_EQ(0, CompareRelativeOutput(output_ptr, dx2_path));
|
||||
for (auto tensor : all_tensors) {
|
||||
delete[] reinterpret_cast<float *>(tensor->MutableData());
|
||||
tensor->set_data(nullptr);
|
||||
|
@ -537,10 +536,10 @@ TEST_F(TestArithmeticGradFp32, TestDivGrad2Fp32) {
|
|||
std::cout << std::endl;
|
||||
|
||||
std::string dx2_path = "./test_data/operators/arithmetic_fp32_6_dx2_4_6.bin";
|
||||
EXPECT_EQ(0, lite::CompareRelativeOutput(reinterpret_cast<float *>(outputs[1]->MutableData()), dx2_path));
|
||||
EXPECT_EQ(0, CompareRelativeOutput(reinterpret_cast<float *>(outputs[1]->MutableData()), dx2_path));
|
||||
|
||||
std::string output_path = "./test_data/operators/arithmetic_fp32_6_dx1_1_6.bin";
|
||||
EXPECT_EQ(0, lite::CompareRelativeOutput(output_ptr, output_path));
|
||||
EXPECT_EQ(0, CompareRelativeOutput(output_ptr, output_path));
|
||||
|
||||
for (auto tensor : all_tensors) {
|
||||
delete[] reinterpret_cast<float *>(tensor->MutableData());
|
||||
|
@ -578,10 +577,10 @@ TEST_F(TestArithmeticGradFp32, TestDivGrad3Fp32) {
|
|||
std::cout << std::endl;
|
||||
|
||||
std::string dx1_path = "./test_data/operators/arithmetic_fp32_10_dx1_5_4_6.bin";
|
||||
EXPECT_EQ(0, lite::CompareRelativeOutput(reinterpret_cast<float *>(outputs[0]->MutableData()), dx1_path));
|
||||
EXPECT_EQ(0, CompareRelativeOutput(reinterpret_cast<float *>(outputs[0]->MutableData()), dx1_path));
|
||||
|
||||
std::string output_path = "./test_data/operators/arithmetic_fp32_10_dx2_5_1_6.bin";
|
||||
EXPECT_EQ(0, lite::CompareRelativeOutput(output_ptr, output_path));
|
||||
EXPECT_EQ(0, CompareRelativeOutput(output_ptr, output_path));
|
||||
for (auto tensor : all_tensors) {
|
||||
delete[] reinterpret_cast<float *>(tensor->MutableData());
|
||||
tensor->set_data(nullptr);
|
||||
|
@ -618,10 +617,10 @@ TEST_F(TestArithmeticGradFp32, Test3DDivGrad2Fp32) {
|
|||
std::cout << std::endl;
|
||||
|
||||
std::string dx1_path = "./test_data/operators/arithmetic_fp32_7_dx1_4_5_6.bin";
|
||||
EXPECT_EQ(0, lite::CompareRelativeOutput(reinterpret_cast<float *>(outputs[0]->MutableData()), dx1_path));
|
||||
EXPECT_EQ(0, CompareRelativeOutput(reinterpret_cast<float *>(outputs[0]->MutableData()), dx1_path));
|
||||
|
||||
std::string output_path = "./test_data/operators/arithmetic_fp32_7_dx2_1_1_6.bin";
|
||||
EXPECT_EQ(0, lite::CompareRelativeOutput(output_ptr, output_path));
|
||||
EXPECT_EQ(0, CompareRelativeOutput(output_ptr, output_path));
|
||||
for (auto tensor : all_tensors) {
|
||||
delete[] reinterpret_cast<float *>(tensor->MutableData());
|
||||
tensor->set_data(nullptr);
|
||||
|
|
|
@ -61,7 +61,7 @@ TEST_F(TestBiasGradFp32, BiasGradFp32) {
|
|||
}
|
||||
std::cout << std::endl;
|
||||
std::string output_path = "./test_data/operators/biasgradfp32_1_db_7.bin";
|
||||
lite::CompareOutput(output_data, 7, output_path);
|
||||
CompareOutput(output_data, 7, output_path);
|
||||
|
||||
delete[] input_data;
|
||||
delete[] output_data;
|
||||
|
|
|
@ -18,7 +18,6 @@
|
|||
#include "src/common/log_adapter.h"
|
||||
#include "common/common_test.h"
|
||||
#include "src/common/file_utils.h"
|
||||
#include "src/common/file_utils_ext.h"
|
||||
#include "src/runtime/kernel/arm/fp32_grad/bn_grad.h"
|
||||
#include "nnacl/fp32_grad/batch_norm.h"
|
||||
#include "nnacl/fp32/batchnorm.h"
|
||||
|
@ -93,18 +92,18 @@ TEST_F(TestBNGradFp32, BNGradFp32) {
|
|||
auto dx = reinterpret_cast<float *>(outputs[0]->MutableData());
|
||||
for (int i = 0; i < 7; i++) std::cout << dx[i] << " ";
|
||||
std::cout << "\n";
|
||||
auto res = mindspore::lite::CompareRelativeOutput(dx, "./test_data/bngrad/output_dx_2_4_5_3.bin");
|
||||
auto res = CompareRelativeOutput(dx, "./test_data/bngrad/output_dx_2_4_5_3.bin");
|
||||
std::cout << "\n=======dscale=======\n";
|
||||
auto dscale = reinterpret_cast<float *>(outputs[1]->MutableData());
|
||||
for (int i = 0; i < channels; i++) std::cout << dscale[i] << " ";
|
||||
std::cout << "\n";
|
||||
res = mindspore::lite::CompareRelativeOutput(dscale, "./test_data/bngrad/output_dscale_3.bin");
|
||||
res = CompareRelativeOutput(dscale, "./test_data/bngrad/output_dscale_3.bin");
|
||||
EXPECT_EQ(res, 0);
|
||||
std::cout << "==========dbias==========\n";
|
||||
auto dbias = reinterpret_cast<float *>(outputs[2]->MutableData());
|
||||
for (int i = 0; i < 3; i++) std::cout << dbias[i] << " ";
|
||||
std::cout << "\n";
|
||||
res = mindspore::lite::CompareRelativeOutput(dbias, "./test_data/bngrad/output_dbias_3.bin");
|
||||
res = CompareRelativeOutput(dbias, "./test_data/bngrad/output_dbias_3.bin");
|
||||
EXPECT_EQ(res, 0);
|
||||
for (auto v : inputs) {
|
||||
delete[] reinterpret_cast<float *>(v->MutableData());
|
||||
|
@ -192,9 +191,9 @@ TEST_F(TestBNGradFp32, BNTtrainFp32) {
|
|||
for (int i = 0; i < channels; i++) std::cout << save_var[i] << " ";
|
||||
std::cout << "\n";
|
||||
delete[] reinterpret_cast<float *>(x_tensor->MutableData());
|
||||
auto res = mindspore::lite::CompareRelativeOutput(save_mean, "./test_data/bngrad/running_mean_3.bin");
|
||||
auto res = CompareRelativeOutput(save_mean, "./test_data/bngrad/running_mean_3.bin");
|
||||
EXPECT_EQ(res, 0);
|
||||
res = mindspore::lite::CompareRelativeOutput(save_var, "./test_data/bngrad/running_var_3.bin");
|
||||
res = CompareRelativeOutput(save_var, "./test_data/bngrad/running_var_3.bin");
|
||||
EXPECT_EQ(res, 0);
|
||||
|
||||
x_tensor->set_data(nullptr);
|
||||
|
|
|
@ -20,7 +20,6 @@
|
|||
#include "src/common/log_adapter.h"
|
||||
#include "common/common_test.h"
|
||||
#include "src/common/file_utils.h"
|
||||
#include "src/common/file_utils_ext.h"
|
||||
#include "mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution.h"
|
||||
#include "mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_filter.h"
|
||||
#include "mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_input.h"
|
||||
|
@ -131,7 +130,7 @@ TEST_F(TestConvolutionGradFp32, ConvFp32FilterGrad) {
|
|||
printf("single thread running time : %f ms\n", time_avg / 1000.0f);
|
||||
|
||||
std::string output_path = "./test_data/conv/convfp32_dw_32_3_3_3.bin";
|
||||
auto res = lite::CompareRelativeOutput(dw_data, output_path);
|
||||
auto res = CompareRelativeOutput(dw_data, output_path);
|
||||
|
||||
EXPECT_EQ(res, 0);
|
||||
|
||||
|
@ -205,7 +204,7 @@ TEST_F(TestConvolutionGradFp32, ConvFp32InputGrad) {
|
|||
printf("single thread running time : %f ms\n", time_avg / 1000.0f);
|
||||
|
||||
std::string output_path = "./test_data/conv/convfp32_dx_1_28_28_3.bin";
|
||||
auto res = lite::CompareRelativeOutput(dx_data, output_path);
|
||||
auto res = CompareRelativeOutput(dx_data, output_path);
|
||||
EXPECT_EQ(res, 0);
|
||||
delete[] dx_data;
|
||||
delete[] w_data;
|
||||
|
@ -276,7 +275,7 @@ TEST_F(TestConvolutionGradFp32, ConvFp32GroupFilterGrad) {
|
|||
printf("single thread running time : %f ms\n", time_avg / 1000.0f);
|
||||
|
||||
std::string output_path = "./test_data/conv/convfp32_dw_g3_18_3_3_3.bin";
|
||||
auto res = lite::CompareRelativeOutput(dw_data, output_path);
|
||||
auto res = CompareRelativeOutput(dw_data, output_path);
|
||||
EXPECT_EQ(res, 0);
|
||||
|
||||
delete[] input_data;
|
||||
|
@ -348,7 +347,7 @@ TEST_F(TestConvolutionGradFp32, ConvFp32GroupInputGrad) {
|
|||
printf("single thread running time : %f ms\n", time_avg / 1000.0f);
|
||||
|
||||
std::string output_path = "./test_data/conv/convfp32_dx_g3_1_28_28_3.bin";
|
||||
auto res = lite::CompareRelativeOutput(dx_data, output_path);
|
||||
auto res = CompareRelativeOutput(dx_data, output_path);
|
||||
EXPECT_EQ(res, 0);
|
||||
delete[] dx_data;
|
||||
delete[] w_data;
|
||||
|
@ -421,7 +420,7 @@ TEST_F(TestConvolutionGradFp32, ConvFp32GroupDilationFilterGrad) {
|
|||
printf("single thread running time : %f ms\n", time_avg / 1000.0f);
|
||||
|
||||
std::string output_path = "./test_data/conv/convfp32_dw_g3_d2_18_3_3_3.bin";
|
||||
auto res = lite::CompareRelativeOutput(dw_data, output_path);
|
||||
auto res = CompareRelativeOutput(dw_data, output_path);
|
||||
EXPECT_EQ(res, 0);
|
||||
delete[] input_data;
|
||||
delete[] dy_data;
|
||||
|
@ -488,7 +487,7 @@ TEST_F(TestConvolutionGradFp32, ConvFp32GroupDilationInputGrad) {
|
|||
printf("single thread running time : %f ms\n", time_avg / 1000.0f);
|
||||
|
||||
std::string output_path = "./test_data/conv/convfp32_dx_g3_d2_1_28_28_3.bin";
|
||||
auto res = lite::CompareRelativeOutput(dx_data, output_path);
|
||||
auto res = CompareRelativeOutput(dx_data, output_path);
|
||||
EXPECT_EQ(res, 0);
|
||||
delete[] dx_data;
|
||||
delete[] w_data;
|
||||
|
@ -563,7 +562,7 @@ TEST_F(TestConvolutionGradFp32, ConvGroupDilation) {
|
|||
printf("single thread running time : %f ms\n", time_avg / 1000.0f);
|
||||
|
||||
std::string output_path = "./test_data/conv/convfp32_y_g3_d2_1_26_26_18.bin";
|
||||
auto res = lite::CompareRelativeOutput(y_data, output_path);
|
||||
auto res = CompareRelativeOutput(y_data, output_path);
|
||||
EXPECT_EQ(res, 0);
|
||||
|
||||
delete[] y_data;
|
||||
|
@ -661,7 +660,7 @@ TEST_F(TestConvolutionGradFp32, ConvFp32Dilation2Group2Stride2FilterGrad) {
|
|||
printf("single thread running time : %f ms\n", time_avg / 1000.0f);
|
||||
|
||||
std::string output_path = "./test_data/conv/convfp32_dw_d2_g2_s2_12_2_3_3.bin";
|
||||
auto res = lite::CompareRelativeOutput(dw_data, output_path);
|
||||
auto res = CompareRelativeOutput(dw_data, output_path);
|
||||
|
||||
EXPECT_EQ(res, 0);
|
||||
|
||||
|
@ -761,7 +760,7 @@ TEST_F(TestConvolutionGradFp32, ConvGroup2Dilation2Stride2) {
|
|||
printf("single thread running time : %f ms\n", time_avg / 1000.0f);
|
||||
|
||||
std::string output_path = "./test_data/conv/convfp32_inputdx_d2_g2_s2_2_4_32_32.bin";
|
||||
auto res = lite::CompareRelativeOutput(dx_data, output_path);
|
||||
auto res = CompareRelativeOutput(dx_data, output_path);
|
||||
EXPECT_EQ(res, 0);
|
||||
delete[] dx_data;
|
||||
delete[] w_data;
|
||||
|
|
|
@ -17,10 +17,8 @@
|
|||
#include <iostream>
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
// #include "utils/log_adapter.h"
|
||||
#include "common/common_test.h"
|
||||
#include "src/common/file_utils.h"
|
||||
#include "src/common/file_utils_ext.h"
|
||||
#include "mindspore/lite/src/runtime/kernel/arm/fp32_grad/deconvolution_grad_filter.h"
|
||||
#include "mindspore/lite/nnacl/conv_parameter.h"
|
||||
#include "mindspore/lite/src/kernel_registry.h"
|
||||
|
@ -114,7 +112,7 @@ TEST_F(TestDeConvolutionGradFp32, DeConvFp32FilterGrad) {
|
|||
printf("single thread running time : %f ms\n", time_avg / 1000.0f);
|
||||
|
||||
std::string output_path = "./test_data/deconv/deconvfp32_dw_9_3_3_3.bin";
|
||||
auto res = lite::CompareRelativeOutput(dw_data, output_path);
|
||||
auto res = CompareRelativeOutput(dw_data, output_path);
|
||||
|
||||
EXPECT_EQ(res, 0);
|
||||
|
||||
|
@ -213,7 +211,7 @@ TEST_F(TestDeConvolutionGradFp32, DeConvFp32Dilation2FilterGrad) {
|
|||
printf("single thread running time : %f ms\n", time_avg / 1000.0f);
|
||||
|
||||
std::string output_path = "./test_data/deconv/deconvfp32_dw_d2_9_3_3_3.bin";
|
||||
auto res = lite::CompareRelativeOutput(dw_data, output_path);
|
||||
auto res = CompareRelativeOutput(dw_data, output_path);
|
||||
|
||||
EXPECT_EQ(res, 0);
|
||||
|
||||
|
@ -312,7 +310,7 @@ TEST_F(TestDeConvolutionGradFp32, DeConvFp32Dilation2Group3FilterGrad) {
|
|||
printf("single thread running time : %f ms\n", time_avg / 1000.0f);
|
||||
|
||||
std::string output_path = "./test_data/deconv/deconvfp32_dw_d2_g3_3_3_3_3.bin";
|
||||
auto res = lite::CompareRelativeOutput(dw_data, output_path);
|
||||
auto res = CompareRelativeOutput(dw_data, output_path);
|
||||
|
||||
EXPECT_EQ(res, 0);
|
||||
|
||||
|
@ -411,7 +409,7 @@ TEST_F(TestDeConvolutionGradFp32, DeConvFp32Dilation2Group3Stride1FilterGrad) {
|
|||
printf("single thread running time : %f ms\n", time_avg / 1000.0f);
|
||||
|
||||
std::string output_path = "./test_data/deconv/deconvfp32_dw_d2_g3_s1_3_3_3_3.bin";
|
||||
auto res = lite::CompareRelativeOutput(dw_data, output_path);
|
||||
auto res = CompareRelativeOutput(dw_data, output_path);
|
||||
|
||||
EXPECT_EQ(res, 0);
|
||||
|
||||
|
@ -510,7 +508,7 @@ TEST_F(TestDeConvolutionGradFp32, DeConvFp32Dilation2Group2Stride2FilterGrad) {
|
|||
printf("single thread running time : %f ms\n", time_avg / 1000.0f);
|
||||
|
||||
std::string output_path = "./test_data/deconv/deconvfp32_dw_d2_g2_s2_6_4_3_3.bin";
|
||||
auto res = lite::CompareRelativeOutput(dw_data, output_path);
|
||||
auto res = CompareRelativeOutput(dw_data, output_path);
|
||||
|
||||
EXPECT_EQ(res, 0);
|
||||
|
||||
|
@ -609,7 +607,7 @@ TEST_F(TestDeConvolutionGradFp32, DeConvFp32Dilation2Group12Stride2FilterGrad) {
|
|||
printf("single thread running time : %f ms\n", time_avg / 1000.0f);
|
||||
|
||||
std::string output_path = "./test_data/deconv/deconvfp32_dw_d2_g12_s2_12_1_3_3.bin";
|
||||
auto res = lite::CompareRelativeOutput(dw_data, output_path);
|
||||
auto res = CompareRelativeOutput(dw_data, output_path);
|
||||
|
||||
EXPECT_EQ(res, 0);
|
||||
|
||||
|
|
|
@ -30,7 +30,6 @@
|
|||
#include "include/errorcode.h"
|
||||
#include "src/common/log_adapter.h"
|
||||
#include "src/common/file_utils.h"
|
||||
#include "src/common/file_utils_ext.h"
|
||||
#include "src/kernel_registry.h"
|
||||
#include "src/runtime/kernel/arm/fp32_grad/convolution.h"
|
||||
|
||||
|
@ -418,7 +417,7 @@ TEST_F(NetworkTest, tuning_layer) {
|
|||
}
|
||||
std::cout << std::endl;
|
||||
std::string output_path = "./test_data/train/train_output_32_10.bin";
|
||||
auto error = lite::RelativeOutputError(outData, output_path);
|
||||
auto error = RelativeOutputError(outData, output_path);
|
||||
EXPECT_LT(error, 2e-3);
|
||||
|
||||
ret = session->RunGraph();
|
||||
|
@ -433,7 +432,7 @@ TEST_F(NetworkTest, tuning_layer) {
|
|||
std::cout << outData[i] << ", ";
|
||||
}
|
||||
std::cout << std::endl;
|
||||
error = lite::RelativeOutputError(outData, output_path);
|
||||
error = RelativeOutputError(outData, output_path);
|
||||
EXPECT_LT(error, 2e-3);
|
||||
|
||||
session->Train();
|
||||
|
@ -449,7 +448,7 @@ TEST_F(NetworkTest, tuning_layer) {
|
|||
std::cout << outData[i] << ", ";
|
||||
}
|
||||
std::cout << std::endl;
|
||||
error = lite::RelativeOutputError(outData, output_path);
|
||||
error = RelativeOutputError(outData, output_path);
|
||||
EXPECT_LT(error, 2e-3);
|
||||
|
||||
delete session;
|
||||
|
@ -502,7 +501,7 @@ int32_t runNet(mindspore::session::LiteSession *session, const std::string &in,
|
|||
}
|
||||
std::cout << std::endl;
|
||||
}
|
||||
return mindspore::lite::CompareRelativeOutput(output_data, out);
|
||||
return CommonTest::CompareRelativeOutput(output_data, out);
|
||||
}
|
||||
|
||||
return lite::RET_ERROR;
|
||||
|
|
|
@ -22,7 +22,6 @@
|
|||
#include "common/common_test.h"
|
||||
#include "src/common/utils.h"
|
||||
#include "src/common/file_utils.h"
|
||||
#include "src/common/file_utils_ext.h"
|
||||
#include "nnacl/fp32_grad/pooling_grad.h"
|
||||
#include "src/runtime/kernel/arm/fp32_grad/pooling_grad.h"
|
||||
#include "mindspore/lite/src/kernel_registry.h"
|
||||
|
@ -97,7 +96,7 @@ TEST_F(TestPoolingGradFp32, AvgPoolingGradFp32) {
|
|||
}
|
||||
std::cout << std::endl;
|
||||
std::string output_path = "./test_data/pooling/avgpoolgradfp32_1_dx_1_28_28_3.bin";
|
||||
auto res = lite::CompareOutput(output_data, output_data_size, output_path);
|
||||
auto res = CompareOutput(output_data, output_data_size, output_path);
|
||||
EXPECT_EQ(res, 0);
|
||||
|
||||
delete[] input_data;
|
||||
|
@ -156,7 +155,7 @@ TEST_F(TestPoolingGradFp32, AvgPoolingKernelGradFp32) {
|
|||
}
|
||||
std::cout << std::endl;
|
||||
std::string output_path = "./test_data/pooling/avgpoolgradfp32_1_dx_1_28_28_3.bin";
|
||||
auto res = lite::CompareOutput(output_data, output_data_size, output_path);
|
||||
auto res = CompareOutput(output_data, output_data_size, output_path);
|
||||
EXPECT_EQ(res, 0);
|
||||
|
||||
delete[] input_data;
|
||||
|
@ -221,7 +220,7 @@ TEST_F(TestPoolingGradFp32, AvgPoolingBatchGradFp32) {
|
|||
std::cout << std::endl;
|
||||
std::string output_path = "./test_data/pooling/avgpoolgradfp32_1_dx_3_28_28_3.bin";
|
||||
size_t output_data_size = dx_tensor.ElementsNum();
|
||||
auto res = lite::CompareOutput(output_data, output_data_size, output_path);
|
||||
auto res = CompareOutput(output_data, output_data_size, output_path);
|
||||
EXPECT_EQ(res, 0);
|
||||
|
||||
delete[] input_data;
|
||||
|
@ -279,7 +278,7 @@ TEST_F(TestPoolingGradFp32, AvgPoolGradStride2Fp32) {
|
|||
kernel->Run();
|
||||
|
||||
std::string output_path = "./test_data/pooling/avgpoolgradfp32_s2_dx_3_28_28_3.bin";
|
||||
auto res = lite::CompareRelativeOutput(out_data, output_path);
|
||||
auto res = CompareRelativeOutput(out_data, output_path);
|
||||
EXPECT_EQ(res, 0);
|
||||
|
||||
delete[] x_data;
|
||||
|
@ -340,7 +339,7 @@ TEST_F(TestPoolingGradFp32, AvgPoolGradStride3Fp32) {
|
|||
kernel->Run();
|
||||
|
||||
std::string output_path = "./test_data/pooling/avgpoolgradfp32_s3_dx_3_28_28_3.bin";
|
||||
auto res = lite::CompareRelativeOutput(out_data, output_path);
|
||||
auto res = CompareRelativeOutput(out_data, output_path);
|
||||
|
||||
EXPECT_EQ(res, 0);
|
||||
|
||||
|
@ -398,7 +397,7 @@ TEST_F(TestPoolingGradFp32, MaxPoolingGradFp32) {
|
|||
}
|
||||
std::cout << std::endl;
|
||||
std::string output_path = "./test_data/pooling/maxpoolgradfp32_1_xgrad_1_28_28_3.bin";
|
||||
auto res = lite::CompareOutput(output_data, output_data_size, output_path);
|
||||
auto res = CompareOutput(output_data, output_data_size, output_path);
|
||||
EXPECT_EQ(res, 0);
|
||||
|
||||
free(pooling_param);
|
||||
|
@ -458,7 +457,7 @@ TEST_F(TestPoolingGradFp32, MaxPoolGradBatchFp32) {
|
|||
kernel->Run();
|
||||
|
||||
std::string output_path = "./test_data/pooling/maxpoolgradfp32_1_xgrad_3_28_28_3.bin";
|
||||
auto res = lite::CompareRelativeOutput(out_data, output_path);
|
||||
auto res = CompareRelativeOutput(out_data, output_path);
|
||||
|
||||
EXPECT_EQ(res, 0);
|
||||
|
||||
|
@ -529,7 +528,7 @@ TEST_F(TestPoolingGradFp32, MaxPoolGradStride2Fp32) {
|
|||
kernel->Run();
|
||||
|
||||
std::string output_path = "./test_data/pooling/maxpoolgradfp32_s2_xgrad_3_28_28_3.bin";
|
||||
auto res = lite::CompareRelativeOutput(out_data, output_path);
|
||||
auto res = CompareRelativeOutput(out_data, output_path);
|
||||
|
||||
EXPECT_EQ(res, 0);
|
||||
|
||||
|
@ -600,7 +599,7 @@ TEST_F(TestPoolingGradFp32, MaxPoolGradStride3Fp32) {
|
|||
kernel->Run();
|
||||
|
||||
std::string output_path = "./test_data/pooling/maxpoolgradfp32_s3_xgrad_3_28_28_3.bin";
|
||||
auto res = lite::CompareRelativeOutput(out_data, output_path);
|
||||
auto res = CompareRelativeOutput(out_data, output_path);
|
||||
|
||||
EXPECT_EQ(res, 0);
|
||||
|
||||
|
|
|
@ -76,7 +76,7 @@ TEST_F(TestSoftmaxCrossEntropyFp32, SoftmaxCrossEntropyFp32) {
|
|||
printf("==================Testing Grad===============\n");
|
||||
|
||||
std::string output_path = "./test_data/operators/sce_fp32_1_loss_1.bin";
|
||||
lite::CompareOutput(loss, 1, output_path);
|
||||
CompareOutput(loss, 1, output_path);
|
||||
|
||||
((mindspore::kernel::SparseSoftmaxCrossEntropyWithLogitsCPUKernel *)kernel_obj)->train();
|
||||
kernel_obj->Run();
|
||||
|
@ -87,7 +87,7 @@ TEST_F(TestSoftmaxCrossEntropyFp32, SoftmaxCrossEntropyFp32) {
|
|||
}
|
||||
std::cout << std::endl;
|
||||
std::string grad_path = "./test_data/operators/sce_fp32_1_dy_6_4.bin";
|
||||
lite::CompareOutput(grad, 24, grad_path);
|
||||
CompareOutput(grad, 24, grad_path);
|
||||
|
||||
delete[] ll_labels;
|
||||
delete[] labels;
|
||||
|
|
|
@ -23,7 +23,6 @@
|
|||
#include "common/common_test.h"
|
||||
#include "src/common/utils.h"
|
||||
#include "src/common/file_utils.h"
|
||||
#include "src/common/file_utils_ext.h"
|
||||
#include "mindspore/lite/src/runtime/kernel/arm/fp32_grad/softmax_grad.h"
|
||||
#include "mindspore/lite/nnacl/fp32_grad/softmax_grad.h"
|
||||
#include "mindspore/lite/src/kernel_registry.h"
|
||||
|
@ -97,7 +96,7 @@ TEST_F(TestSoftmaxGradFp32, SoftmaxGradAxis0) {
|
|||
|
||||
std::string output_path = "./test_data/softmax/softmaxgrad_out.bin";
|
||||
|
||||
auto res = lite::CompareRelativeOutput(out_data, output_path);
|
||||
auto res = CompareRelativeOutput(out_data, output_path);
|
||||
EXPECT_EQ(res, 0);
|
||||
|
||||
delete[] input_data;
|
||||
|
@ -156,7 +155,7 @@ TEST_F(TestSoftmaxGradFp32, SoftmaxGradAxis1) {
|
|||
std::string output_path = "./test_data/softmax/softmaxgrad_1_out.bin";
|
||||
// auto output_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(input_path.c_str(), &input_size));
|
||||
|
||||
auto res = lite::CompareRelativeOutput(out_data, output_path);
|
||||
auto res = CompareRelativeOutput(out_data, output_path);
|
||||
EXPECT_EQ(res, 0);
|
||||
|
||||
delete[] input_data;
|
||||
|
@ -215,7 +214,7 @@ TEST_F(TestSoftmaxGradFp32, SoftmaxGradAxis2) {
|
|||
std::string output_path = "./test_data/softmax/softmaxgrad_2_out.bin";
|
||||
// auto output_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(input_path.c_str(), &input_size));
|
||||
|
||||
auto res = lite::CompareRelativeOutput(out_data, output_path);
|
||||
auto res = CompareRelativeOutput(out_data, output_path);
|
||||
EXPECT_EQ(res, 0);
|
||||
|
||||
delete[] input_data;
|
||||
|
@ -274,7 +273,7 @@ TEST_F(TestSoftmaxGradFp32, SoftmaxGradAxis3) {
|
|||
std::string output_path = "./test_data/softmax/softmaxgrad_3_out.bin";
|
||||
// auto output_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(input_path.c_str(), &input_size));
|
||||
|
||||
auto res = lite::CompareRelativeOutput(out_data, output_path);
|
||||
auto res = CompareRelativeOutput(out_data, output_path);
|
||||
EXPECT_EQ(res, 0);
|
||||
|
||||
delete[] input_data;
|
||||
|
@ -333,7 +332,7 @@ TEST_F(TestSoftmaxGradFp32, SoftmaxGradAxisMinus1) {
|
|||
std::string output_path = "./test_data/softmax/softmaxgrad_-1_out.bin";
|
||||
// auto output_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(input_path.c_str(), &input_size));
|
||||
|
||||
auto res = lite::CompareRelativeOutput(out_data, output_path);
|
||||
auto res = CompareRelativeOutput(out_data, output_path);
|
||||
EXPECT_EQ(res, 0);
|
||||
|
||||
delete[] input_data;
|
||||
|
|
|
@ -80,7 +80,7 @@ TEST_F(TestArithmeticSelfInt8, floor_quant0_thread2) {
|
|||
std::vector<int8_t> except_result = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12};
|
||||
PrintData("output data", output, output_size);
|
||||
PrintData("output data shape", output_tensor_shape.data(), output_tensor_shape.size());
|
||||
CompareOutputData(output, except_result.data(), output_size, 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(output, except_result.data(), output_size, 0.000001));
|
||||
|
||||
input_tensor1->set_data(nullptr);
|
||||
output0_tensor->set_data(nullptr);
|
||||
|
@ -140,7 +140,7 @@ TEST_F(TestArithmeticSelfInt8, floor_quant1_thread2) {
|
|||
std::vector<int8_t> except_result = {0, 1, 1, 2, 3, 3, 3, 4, 5, 5, 5, 6};
|
||||
PrintData("output data", output, output_size);
|
||||
PrintData("output data shape", output_tensor_shape.data(), output_tensor_shape.size());
|
||||
CompareOutputData(output, except_result.data(), output_size, 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(output, except_result.data(), output_size, 0.000001));
|
||||
|
||||
input_tensor1->set_data(nullptr);
|
||||
output0_tensor->set_data(nullptr);
|
||||
|
@ -200,7 +200,7 @@ TEST_F(TestArithmeticSelfInt8, round_quant0_thread2) {
|
|||
std::vector<int8_t> except_result = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12};
|
||||
PrintData("output data", output, output_size);
|
||||
PrintData("output data shape", output_tensor_shape.data(), output_tensor_shape.size());
|
||||
CompareOutputData(output, except_result.data(), output_size, 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(output, except_result.data(), output_size, 0.000001));
|
||||
|
||||
input_tensor1->set_data(nullptr);
|
||||
output0_tensor->set_data(nullptr);
|
||||
|
@ -260,7 +260,7 @@ TEST_F(TestArithmeticSelfInt8, round_quant1_thread2) {
|
|||
std::vector<int8_t> except_result = {1, 1, 1, 2, 3, 3, 4, 4, 5, 5, 6, 7};
|
||||
PrintData("output data", output, output_size);
|
||||
PrintData("output data shape", output_tensor_shape.data(), output_tensor_shape.size());
|
||||
CompareOutputData(output, except_result.data(), output_size, 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(output, except_result.data(), output_size, 0.000001));
|
||||
|
||||
input_tensor1->set_data(nullptr);
|
||||
output0_tensor->set_data(nullptr);
|
||||
|
@ -320,7 +320,7 @@ TEST_F(TestArithmeticSelfInt8, ceil_quant0_thread2) {
|
|||
std::vector<int8_t> except_result = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12};
|
||||
PrintData("output data", output, output_size);
|
||||
PrintData("output data shape", output_tensor_shape.data(), output_tensor_shape.size());
|
||||
CompareOutputData(output, except_result.data(), output_size, 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(output, except_result.data(), output_size, 0.000001));
|
||||
|
||||
input_tensor1->set_data(nullptr);
|
||||
output0_tensor->set_data(nullptr);
|
||||
|
@ -380,7 +380,7 @@ TEST_F(TestArithmeticSelfInt8, ceil_quant1_thread2) {
|
|||
std::vector<int8_t> except_result = {1, 1, 2, 3, 3, 3, 4, 5, 5, 5, 6, 7};
|
||||
PrintData("output data", output, output_size);
|
||||
PrintData("output data shape", output_tensor_shape.data(), output_tensor_shape.size());
|
||||
CompareOutputData(output, except_result.data(), output_size, 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(output, except_result.data(), output_size, 0.000001));
|
||||
|
||||
input_tensor1->set_data(nullptr);
|
||||
output0_tensor->set_data(nullptr);
|
||||
|
@ -440,7 +440,7 @@ TEST_F(TestArithmeticSelfInt8, abs_quant0_thread0) {
|
|||
std::vector<int8_t> except_result = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12};
|
||||
PrintData("output data", output, output_size);
|
||||
PrintData("output data shape", output_tensor_shape.data(), output_tensor_shape.size());
|
||||
CompareOutputData(output, except_result.data(), output_size, 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(output, except_result.data(), output_size, 0.000001));
|
||||
|
||||
input_tensor1->set_data(nullptr);
|
||||
output0_tensor->set_data(nullptr);
|
||||
|
@ -500,7 +500,7 @@ TEST_F(TestArithmeticSelfInt8, abs_quant1_thread2) {
|
|||
std::vector<int8_t> except_result = {1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6};
|
||||
PrintData("output data", output, output_size);
|
||||
PrintData("output data shape", output_tensor_shape.data(), output_tensor_shape.size());
|
||||
CompareOutputData(output, except_result.data(), output_size, 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(output, except_result.data(), output_size, 0.000001));
|
||||
|
||||
input_tensor1->set_data(nullptr);
|
||||
output0_tensor->set_data(nullptr);
|
||||
|
@ -560,7 +560,7 @@ TEST_F(TestArithmeticSelfInt8, sin_quant0_thread2) {
|
|||
std::vector<int8_t> except_result = {1, 1, 0, -1};
|
||||
PrintData("output data", output, output_size);
|
||||
PrintData("output data shape", output_tensor_shape.data(), output_tensor_shape.size());
|
||||
CompareOutputData(output, except_result.data(), output_size, 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(output, except_result.data(), output_size, 0.000001));
|
||||
|
||||
input_tensor1->set_data(nullptr);
|
||||
output0_tensor->set_data(nullptr);
|
||||
|
@ -620,7 +620,7 @@ TEST_F(TestArithmeticSelfInt8, cos_quant0_thread2) {
|
|||
std::vector<int8_t> except_result = {1, 0, -1, -1};
|
||||
PrintData("output data", output, output_size);
|
||||
PrintData("output data shape", output_tensor_shape.data(), output_tensor_shape.size());
|
||||
CompareOutputData(output, except_result.data(), output_size, 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(output, except_result.data(), output_size, 0.000001));
|
||||
|
||||
input_tensor1->set_data(nullptr);
|
||||
output0_tensor->set_data(nullptr);
|
||||
|
@ -680,7 +680,7 @@ TEST_F(TestArithmeticSelfInt8, log_quant0_thread2) {
|
|||
std::vector<int8_t> except_result = {0, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2};
|
||||
PrintData("output data", output, output_size);
|
||||
PrintData("output data shape", output_tensor_shape.data(), output_tensor_shape.size());
|
||||
CompareOutputData(output, except_result.data(), output_size, 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(output, except_result.data(), output_size, 0.000001));
|
||||
|
||||
input_tensor1->set_data(nullptr);
|
||||
output0_tensor->set_data(nullptr);
|
||||
|
@ -740,7 +740,7 @@ TEST_F(TestArithmeticSelfInt8, sqrt_quant0_thread2) {
|
|||
std::vector<int8_t> except_result = {1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3};
|
||||
PrintData("output data", output, output_size);
|
||||
PrintData("output data shape", output_tensor_shape.data(), output_tensor_shape.size());
|
||||
CompareOutputData(output, except_result.data(), output_size, 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(output, except_result.data(), output_size, 0.000001));
|
||||
|
||||
input_tensor1->set_data(nullptr);
|
||||
output0_tensor->set_data(nullptr);
|
||||
|
@ -800,7 +800,7 @@ TEST_F(TestArithmeticSelfInt8, rsqrt_quant0_thread2) {
|
|||
std::vector<int8_t> except_result = {1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0};
|
||||
PrintData("output data", output, output_size);
|
||||
PrintData("output data shape", output_tensor_shape.data(), output_tensor_shape.size());
|
||||
CompareOutputData(output, except_result.data(), output_size, 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(output, except_result.data(), output_size, 0.000001));
|
||||
|
||||
input_tensor1->set_data(nullptr);
|
||||
output0_tensor->set_data(nullptr);
|
||||
|
@ -860,7 +860,7 @@ TEST_F(TestArithmeticSelfInt8, square_quant0_thread2) {
|
|||
std::vector<int8_t> except_result = {1, 4, 9, 16, 25, 36, 49, 64, 81, 100, 121, 127};
|
||||
PrintData("output data", output, output_size);
|
||||
PrintData("output data shape", output_tensor_shape.data(), output_tensor_shape.size());
|
||||
CompareOutputData(output, except_result.data(), output_size, 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(output, except_result.data(), output_size, 0.000001));
|
||||
|
||||
input_tensor1->set_data(nullptr);
|
||||
output0_tensor->set_data(nullptr);
|
||||
|
@ -920,7 +920,7 @@ TEST_F(TestArithmeticSelfInt8, square_quant1_thread2) {
|
|||
std::vector<int8_t> except_result = {1, 2, 4, 7, 11, 16, 21, 28, 35, 43, 52, 62};
|
||||
PrintData("output data", output, output_size);
|
||||
PrintData("output data shape", output_tensor_shape.data(), output_tensor_shape.size());
|
||||
CompareOutputData(output, except_result.data(), output_size, 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(output, except_result.data(), output_size, 0.000001));
|
||||
|
||||
input_tensor1->set_data(nullptr);
|
||||
output0_tensor->set_data(nullptr);
|
||||
|
@ -980,7 +980,7 @@ TEST_F(TestArithmeticSelfInt8, logical_not_quant0_thread2) {
|
|||
std::vector<int8_t> except_result = {0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1};
|
||||
PrintData("output data", output, output_size);
|
||||
PrintData("output data shape", output_tensor_shape.data(), output_tensor_shape.size());
|
||||
CompareOutputData(output, except_result.data(), output_size, 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(output, except_result.data(), output_size, 0.000001));
|
||||
|
||||
input_tensor1->set_data(nullptr);
|
||||
output0_tensor->set_data(nullptr);
|
||||
|
|
|
@ -116,7 +116,7 @@ TEST_F(TestBatchnormInt8, FusedTest) {
|
|||
printf("%d, ", output[i]);
|
||||
}
|
||||
std::cout << std::endl;
|
||||
CompareOutputData(output.data(), corr_out.data(), output0_tensor.ElementsNum(), 0.001);
|
||||
ASSERT_EQ(0, CompareOutputData(output.data(), corr_out.data(), output0_tensor.ElementsNum(), 0.001));
|
||||
|
||||
input0_tensor.set_data(nullptr);
|
||||
input1_tensor.set_data(nullptr);
|
||||
|
@ -197,7 +197,7 @@ TEST_F(TestBatchnormInt8, BNTest) {
|
|||
printf("%d, ", output[i]);
|
||||
}
|
||||
std::cout << std::endl;
|
||||
CompareOutputData(output.data(), corr_out.data(), output0_tensor.ElementsNum(), 0.001);
|
||||
ASSERT_EQ(0, CompareOutputData(output.data(), corr_out.data(), output0_tensor.ElementsNum(), 0.001));
|
||||
|
||||
input0_tensor.set_data(nullptr);
|
||||
input1_tensor.set_data(nullptr);
|
||||
|
|
|
@ -92,7 +92,7 @@ TEST_F(TestConcatInt8, Concat1_axis0) {
|
|||
|
||||
std::vector<int8_t> except_result = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12};
|
||||
PrintData("output data", output, input1.size() + input2.size());
|
||||
CompareOutputData(output, except_result.data(), input1.size() + input2.size(), 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(output, except_result.data(), input1.size() + input2.size(), 0.000001));
|
||||
input_tensor1->set_data(nullptr);
|
||||
input_tensor2->set_data(nullptr);
|
||||
output0_tensor->set_data(nullptr);
|
||||
|
@ -164,7 +164,7 @@ TEST_F(TestConcatInt8, Concat1_axis1_thread2) {
|
|||
|
||||
std::vector<int8_t> except_result = {10, 11, 12, 13, 14, 15, 30, 31, 20, 21, 22, 23, 24, 25, 32, 33};
|
||||
PrintData("output data", output, input1.size() + input2.size());
|
||||
CompareOutputData(output, except_result.data(), input1.size() + input2.size(), 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(output, except_result.data(), input1.size() + input2.size(), 0.000001));
|
||||
|
||||
input_tensor1->set_data(nullptr);
|
||||
input_tensor2->set_data(nullptr);
|
||||
|
@ -237,7 +237,7 @@ TEST_F(TestConcatInt8, Concat1_axis1_thread2_quant1) {
|
|||
|
||||
std::vector<int8_t> except_result = {5, 6, 6, 7, 7, 8, 15, 16, 10, 11, 11, 12, 12, 13, 16, 17};
|
||||
PrintData("output data", output, input1.size() + input2.size());
|
||||
CompareOutputData(output, except_result.data(), input1.size() + input2.size(), 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(output, except_result.data(), input1.size() + input2.size(), 0.000001));
|
||||
|
||||
input_tensor1->set_data(nullptr);
|
||||
input_tensor2->set_data(nullptr);
|
||||
|
|
|
@ -42,7 +42,7 @@ TEST_F(TestConv1x1Int8, Input1x1PrePack1) {
|
|||
1, -1, 41, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
|
||||
int8_t out[54] = {0};
|
||||
Conv1x1InputPack(in, out, conv_param, sizeof(int8_t));
|
||||
CompareOutputData(out, correct, 54, 0);
|
||||
ASSERT_EQ(0, CompareOutputData(out, correct, 54, 0));
|
||||
delete conv_param;
|
||||
}
|
||||
|
||||
|
@ -65,7 +65,7 @@ TEST_F(TestConv1x1Int8, Input1x1PrePack2) {
|
|||
|
||||
int8_t out[20] = {0};
|
||||
Conv1x1InputPack(in, out, conv_param, sizeof(int8_t));
|
||||
CompareOutputData(out, correct, 20, 0);
|
||||
ASSERT_EQ(0, CompareOutputData(out, correct, 20, 0));
|
||||
delete conv_param;
|
||||
}
|
||||
|
||||
|
@ -130,7 +130,7 @@ TEST_F(TestConv1x1Int8, Conv1x1TestPerChannel) {
|
|||
|
||||
conv1x1->Init();
|
||||
conv1x1->Run();
|
||||
CompareOutputData(reinterpret_cast<int8_t *>(outputs_[0]->MutableData()), correct, total_size, 70);
|
||||
ASSERT_EQ(0, CompareOutputData(reinterpret_cast<int8_t *>(outputs_[0]->MutableData()), correct, total_size, 70));
|
||||
|
||||
delete conv1x1;
|
||||
for (auto t : inputs_) delete t;
|
||||
|
@ -199,7 +199,7 @@ TEST_F(TestConv1x1Int8, Conv1x1Int8Test1) {
|
|||
|
||||
conv1x1->Init();
|
||||
conv1x1->Run();
|
||||
CompareOutputData(reinterpret_cast<int8_t *>(outputs_[0]->MutableData()), correct, total_size, 2);
|
||||
ASSERT_EQ(0, CompareOutputData(reinterpret_cast<int8_t *>(outputs_[0]->MutableData()), correct, total_size, 2));
|
||||
|
||||
delete conv1x1;
|
||||
for (auto t : inputs_) delete t;
|
||||
|
@ -271,12 +271,12 @@ TEST_F(TestConv1x1Int8, Conv1x1Int8Test2) {
|
|||
ctx->thread_num_ = 1;
|
||||
ASSERT_EQ(lite::RET_OK, ctx->Init());
|
||||
int total_size = Conv1x1Int8TestInit2(&inputs_, &outputs_, conv_param, &correct);
|
||||
kernel::Convolution1x1Int8CPUKernel *conv1x1 = new kernel::Convolution1x1Int8CPUKernel(
|
||||
reinterpret_cast<OpParameter *>(conv_param), inputs_, outputs_, ctx, nullptr);
|
||||
auto *conv1x1 = new kernel::Convolution1x1Int8CPUKernel(reinterpret_cast<OpParameter *>(conv_param), inputs_,
|
||||
outputs_, ctx, nullptr);
|
||||
|
||||
conv1x1->Init();
|
||||
conv1x1->Run();
|
||||
CompareOutputData(reinterpret_cast<int8_t *>(outputs_[0]->MutableData()), correct, total_size, 2);
|
||||
ASSERT_EQ(0, CompareOutputData(reinterpret_cast<int8_t *>(outputs_[0]->MutableData()), correct, total_size, 2));
|
||||
|
||||
delete conv1x1;
|
||||
for (auto t : inputs_) delete t;
|
||||
|
|
|
@ -85,7 +85,7 @@ TEST_F(TestCropInt8, crop_1d_axis0_offset0_quant0_thread2) {
|
|||
std::vector<int8_t> except_result = {2, 3, 4, 5, 6, 7, 8};
|
||||
PrintData("output data", output, output_size);
|
||||
PrintData("output data shape", output_tensor_shape.data(), output_tensor_shape.size());
|
||||
CompareOutputData(output, except_result.data(), output_size, 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(output, except_result.data(), output_size, 0.000001));
|
||||
|
||||
input_tensor1->set_data(nullptr);
|
||||
output0_tensor->set_data(nullptr);
|
||||
|
@ -149,7 +149,7 @@ TEST_F(TestCropInt8, crop_2d_axis1_offset0_quant0_thread2) {
|
|||
std::vector<int8_t> except_result = {2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 13, 14, 15, 16};
|
||||
PrintData("output data", output, output_size);
|
||||
PrintData("output data shape", output_tensor_shape.data(), output_tensor_shape.size());
|
||||
CompareOutputData(output, except_result.data(), output_size, 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(output, except_result.data(), output_size, 0.000001));
|
||||
|
||||
input_tensor1->set_data(nullptr);
|
||||
output0_tensor->set_data(nullptr);
|
||||
|
@ -213,7 +213,7 @@ TEST_F(TestCropInt8, crop_3d_axis1_offset0_quant0_thread0) {
|
|||
std::vector<int8_t> except_result = {4, 8};
|
||||
PrintData("output data", output, output_size);
|
||||
PrintData("output data shape", output_tensor_shape.data(), output_tensor_shape.size());
|
||||
CompareOutputData(output, except_result.data(), output_size, 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(output, except_result.data(), output_size, 0.000001));
|
||||
|
||||
input_tensor1->set_data(nullptr);
|
||||
output0_tensor->set_data(nullptr);
|
||||
|
@ -278,7 +278,7 @@ TEST_F(TestCropInt8, crop_3d_axis1_offset0_quant0_thread2) {
|
|||
std::vector<int8_t> except_result = {4, 6, 8, 10, 12, 14, 16, 20, 22, 24, 26, 28, 30, 32};
|
||||
PrintData("output data", output, output_size);
|
||||
PrintData("output data shape", output_tensor_shape.data(), output_tensor_shape.size());
|
||||
CompareOutputData(output, except_result.data(), output_size, 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(output, except_result.data(), output_size, 0.000001));
|
||||
|
||||
input_tensor1->set_data(nullptr);
|
||||
output0_tensor->set_data(nullptr);
|
||||
|
@ -342,7 +342,7 @@ TEST_F(TestCropInt8, crop_4d_axis0_offset0_quant0_thread0) {
|
|||
std::vector<int8_t> except_result = {16};
|
||||
PrintData("output data", output, output_size);
|
||||
PrintData("output data shape", output_tensor_shape.data(), output_tensor_shape.size());
|
||||
CompareOutputData(output, except_result.data(), output_size, 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(output, except_result.data(), output_size, 0.000001));
|
||||
|
||||
input_tensor1->set_data(nullptr);
|
||||
output0_tensor->set_data(nullptr);
|
||||
|
@ -406,7 +406,7 @@ TEST_F(TestCropInt8, crop_4d_axis1_offset0_quant0_thread0) {
|
|||
std::vector<int8_t> except_result = {8, 16};
|
||||
PrintData("output data", output, output_size);
|
||||
PrintData("output data shape", output_tensor_shape.data(), output_tensor_shape.size());
|
||||
CompareOutputData(output, except_result.data(), output_size, 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(output, except_result.data(), output_size, 0.000001));
|
||||
|
||||
input_tensor1->set_data(nullptr);
|
||||
output0_tensor->set_data(nullptr);
|
||||
|
@ -473,7 +473,7 @@ TEST_F(TestCropInt8, crop_4d_axis1_offset1_quant0_thread0) {
|
|||
std::vector<int8_t> except_result = {13, 14, 15, 16};
|
||||
PrintData("output data", output, output_size);
|
||||
PrintData("output data shape", output_tensor_shape.data(), output_tensor_shape.size());
|
||||
CompareOutputData(output, except_result.data(), output_size, 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(output, except_result.data(), output_size, 0.000001));
|
||||
|
||||
input_tensor1->set_data(nullptr);
|
||||
output0_tensor->set_data(nullptr);
|
||||
|
@ -540,7 +540,7 @@ TEST_F(TestCropInt8, crop_4d_axis1_offset1_quant1_thread0) {
|
|||
std::vector<int8_t> except_result = {7, 7, 8, 8};
|
||||
PrintData("output data", output, output_size);
|
||||
PrintData("output data shape", output_tensor_shape.data(), output_tensor_shape.size());
|
||||
CompareOutputData(output, except_result.data(), output_size, 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(output, except_result.data(), output_size, 0.000001));
|
||||
|
||||
input_tensor1->set_data(nullptr);
|
||||
output0_tensor->set_data(nullptr);
|
||||
|
@ -606,7 +606,7 @@ TEST_F(TestCropInt8, crop_4d_axis0_offset0_quant0_thread2) {
|
|||
std::vector<int8_t> except_result = {40, 44, 48, 52, 56, 60, 64};
|
||||
PrintData("output data", output, output_size);
|
||||
PrintData("output data shape", output_tensor_shape.data(), output_tensor_shape.size());
|
||||
CompareOutputData(output, except_result.data(), output_size, 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(output, except_result.data(), output_size, 0.000001));
|
||||
|
||||
input_tensor1->set_data(nullptr);
|
||||
output0_tensor->set_data(nullptr);
|
||||
|
@ -672,7 +672,7 @@ TEST_F(TestCropInt8, crop_4d_axis0_offset0_quant0_thread3) {
|
|||
std::vector<int8_t> except_result = {40, 44, 48, 52, 56, 60, 64};
|
||||
PrintData("output data", output, output_size);
|
||||
PrintData("output data shape", output_tensor_shape.data(), output_tensor_shape.size());
|
||||
CompareOutputData(output, except_result.data(), output_size, 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(output, except_result.data(), output_size, 0.000001));
|
||||
|
||||
input_tensor1->set_data(nullptr);
|
||||
output0_tensor->set_data(nullptr);
|
||||
|
|
|
@ -48,7 +48,7 @@ TEST_F(TestDeconvInt8, PackWeight1) {
|
|||
int8_t dst[80] = {0};
|
||||
/*5*1*2*6 nhwc*/
|
||||
PackNHWCToC8HWN8Int8(in, dst, 5, 2, 6);
|
||||
CompareOutputData(dst, co, 80, 1);
|
||||
ASSERT_EQ(0, CompareOutputData(dst, co, 80, 1));
|
||||
}
|
||||
|
||||
TEST_F(TestDeconvInt8, PackWeight2) {
|
||||
|
@ -105,7 +105,7 @@ TEST_F(TestDeconvInt8, PackWeight2) {
|
|||
46, 121, 66, 92, 0, 0, 0, 0};
|
||||
int8_t dst[528] = {0};
|
||||
PackNHWCToC8HWN8Int8(in, dst, 22, 1, 20);
|
||||
CompareOutputData(dst, co, 528, 1);
|
||||
ASSERT_EQ(0, CompareOutputData(dst, co, 528, 1));
|
||||
}
|
||||
|
||||
TEST_F(TestDeconvInt8, PackInputTest1) {
|
||||
|
@ -131,7 +131,7 @@ TEST_F(TestDeconvInt8, PackInputTest1) {
|
|||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
|
||||
int8_t dst[8 * 32] = {0};
|
||||
RowMajor2Row16x4MajorInt8(in, dst, 6, 20);
|
||||
CompareOutputData(dst, co, 8 * 32, 1);
|
||||
ASSERT_EQ(0, CompareOutputData(dst, co, 8 * 32, 1));
|
||||
}
|
||||
|
||||
TEST_F(TestDeconvInt8, InputSumTest1) {
|
||||
|
@ -150,12 +150,12 @@ TEST_F(TestDeconvInt8, InputSumTest1) {
|
|||
int32_t input_sum[12] = {0};
|
||||
int32_t correct_input_sum[] = {-7100, -4780, 580, -4880, -9460, -1420, -3120, -3260, -1840, -6960, -4800, -4800};
|
||||
DeConvPackInputSum(packed_a, input_sum, filter_zp, 12, 16, true);
|
||||
CompareOutputData(input_sum, correct_input_sum, 12, 0);
|
||||
ASSERT_EQ(0, CompareOutputData(input_sum, correct_input_sum, 12, 0));
|
||||
|
||||
int32_t input_sum_4[4] = {0};
|
||||
int32_t correct_input_sum_4[] = {-18400, -13160, -7340, -12940};
|
||||
DeConvPackInputSum(packed_a, input_sum_4, filter_zp, 4, 16 * 3, true);
|
||||
CompareOutputData(input_sum_4, correct_input_sum_4, 4, 0);
|
||||
ASSERT_EQ(0, CompareOutputData(input_sum_4, correct_input_sum_4, 4, 0));
|
||||
}
|
||||
|
||||
TEST_F(TestDeconvInt8, MatMulOptTest1) {
|
||||
|
@ -196,7 +196,7 @@ TEST_F(TestDeconvInt8, MatMulOptTest1) {
|
|||
55, 10, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
|
||||
15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15};
|
||||
RowMajor2Row16x4MajorInt8(a_src_ptr, packed_a, 10, 12);
|
||||
CompareOutputData(packed_a, correct_packed_a, 16 * 12, 0);
|
||||
ASSERT_EQ(0, CompareOutputData(packed_a, correct_packed_a, 16 * 12, 0));
|
||||
|
||||
/*
|
||||
* ---------------------- pack weight ------------------------- */
|
||||
|
@ -224,14 +224,14 @@ TEST_F(TestDeconvInt8, MatMulOptTest1) {
|
|||
-20, -20, -20, -20, -20, -20};
|
||||
DeConvWeightTransInt8(b_src_ptr, packed_b, 12, 6, 3, true);
|
||||
/* kernel : 12x1x3x6 nhwc */
|
||||
CompareOutputData(packed_b, correct_packed_b, 16 * 3 * 8, 0);
|
||||
ASSERT_EQ(0, CompareOutputData(packed_b, correct_packed_b, 16 * 3 * 8, 0));
|
||||
|
||||
/*
|
||||
* ---------------------- calculate input_sum ------------------------- */
|
||||
int32_t input_sum[12] = {0};
|
||||
int32_t correct_input_sum[] = {-7100, -4780, 580, -4880, -9460, -1420, -3120, -3260, -1840, -6960, -4800, -4800};
|
||||
DeConvPackInputSum(packed_a, input_sum, filter_zp, 12, 16, true);
|
||||
CompareOutputData(input_sum, correct_input_sum, 12, 0);
|
||||
ASSERT_EQ(0, CompareOutputData(input_sum, correct_input_sum, 12, 0));
|
||||
|
||||
/*
|
||||
* ---------------------- calculate weight_sum ------------------------- */
|
||||
|
@ -239,7 +239,7 @@ TEST_F(TestDeconvInt8, MatMulOptTest1) {
|
|||
int32_t correct_weight_sum[] = {-7395, -8265, -3090, -435, -5655, -1035, 0, 0, 1695, -4770, -6630, 300,
|
||||
-765, -2835, 0, 0, -7395, 4665, -2475, -4170, -2880, -1110, 0, 0};
|
||||
DeConvPackWeightSum(packed_b, weight_sum, input_zp, filter_zp, 16, 24, true);
|
||||
CompareOutputData(weight_sum, correct_weight_sum, 3 * 8, 0);
|
||||
ASSERT_EQ(0, CompareOutputData(weight_sum, correct_weight_sum, 3 * 8, 0));
|
||||
|
||||
/*
|
||||
* ---------------------- do matmul ------------------------- */
|
||||
|
@ -268,36 +268,36 @@ TEST_F(TestDeconvInt8, MatMulOptTest1) {
|
|||
0, 0, 0, 0, 0, 0, 0, 0};
|
||||
|
||||
MatMulInt8_16x4(packed_a, packed_b, tmp_output, 12, 24, 16, input_sum, weight_sum);
|
||||
CompareOutputData(tmp_output, correct_tmp_output, 12 * 3 * 8, 0);
|
||||
ASSERT_EQ(0, CompareOutputData(tmp_output, correct_tmp_output, 12 * 3 * 8, 0));
|
||||
}
|
||||
|
||||
int DeConvInt8TestInit1(std::vector<lite::Tensor *> *inputs_, std::vector<lite::Tensor *> *outputs_,
|
||||
ConvParameter *conv_param, int8_t **correct) {
|
||||
/* float data from deconv fp32 testcase : DeConvTestInit2 */
|
||||
/* vq = (vi - zp) * s vi = vq / s + zp */
|
||||
Tensor *in_t = new Tensor(kNumberTypeInt8, {1, 4, 2, 3}, Format_NHWC, lite::Tensor::Category::VAR);
|
||||
auto *in_t = new Tensor(kNumberTypeInt8, {1, 4, 2, 3}, Format_NHWC, lite::Tensor::Category::VAR);
|
||||
in_t->MallocData();
|
||||
int8_t in[] = {6, 43, 38, 24, -8, 12, 41, -24, -20, 41, -19, -6, -26, -6, 23, -31, 34, 45, 8, 45, -39, -27, -48, 12};
|
||||
memcpy(in_t->MutableData(), in, sizeof(int8_t) * in_t->ElementsNum());
|
||||
QuantArg *in_quant_arg = new QuantArg();
|
||||
auto *in_quant_arg = new QuantArg();
|
||||
in_quant_arg->zeroPoint = -19, in_quant_arg->scale = 0.31228156;
|
||||
in_t->AddQuantParam(*in_quant_arg);
|
||||
inputs_->push_back(in_t);
|
||||
|
||||
Tensor *weight_t = new Tensor(kNumberTypeInt8, {3, 3, 3, 2}, Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
|
||||
auto *weight_t = new Tensor(kNumberTypeInt8, {3, 3, 3, 2}, Format_NHWC, lite::Tensor::Category::CONST_TENSOR);
|
||||
weight_t->MallocData();
|
||||
int8_t weight[] = {66, 89, 98, 74, 95, 86, 125, 95, 105, 83, 116, 94, 90, 80, 86, 59, 72, 92,
|
||||
64, 76, 92, 80, 90, 87, 106, 55, 105, 60, 75, 53, 81, 81, 98, 81, 86, 59,
|
||||
74, 82, 97, 105, 71, 67, 79, 87, 72, 79, 80, 76, 96, 80, 83, 71, 61, 79};
|
||||
memcpy(weight_t->MutableData(), weight, sizeof(int8_t) * weight_t->ElementsNum());
|
||||
QuantArg *w_quant_arg = new QuantArg();
|
||||
auto *w_quant_arg = new QuantArg();
|
||||
w_quant_arg->zeroPoint = 83, w_quant_arg->scale = 0.023649725490196;
|
||||
weight_t->AddQuantParam(*w_quant_arg);
|
||||
inputs_->push_back(weight_t);
|
||||
|
||||
Tensor *out_t = new Tensor(kNumberTypeInt8, {1, 7, 3, 2}, Format_NHWC, lite::Tensor::Category::VAR);
|
||||
auto *out_t = new Tensor(kNumberTypeInt8, {1, 7, 3, 2}, Format_NHWC, lite::Tensor::Category::VAR);
|
||||
out_t->MallocData();
|
||||
QuantArg *out_quant_arg = new QuantArg();
|
||||
auto *out_quant_arg = new QuantArg();
|
||||
out_quant_arg->zeroPoint = 31, out_quant_arg->scale = 0.3439215686275;
|
||||
out_t->AddQuantParam(*out_quant_arg);
|
||||
outputs_->push_back(out_t);
|
||||
|
@ -318,17 +318,17 @@ TEST_F(TestDeconvInt8, DeConvInt8Test1) {
|
|||
std::vector<lite::Tensor *> inputs_;
|
||||
std::vector<lite::Tensor *> outputs_;
|
||||
auto deconv_param = new ConvParameter();
|
||||
lite::InnerContext *ctx = new lite::InnerContext;
|
||||
auto *ctx = new lite::InnerContext;
|
||||
ctx->thread_num_ = 1;
|
||||
ASSERT_EQ(lite::RET_OK, ctx->Init());
|
||||
int8_t *correct;
|
||||
int total_size = DeConvInt8TestInit1(&inputs_, &outputs_, deconv_param, &correct);
|
||||
mindspore::kernel::DeConvInt8CPUKernel *deconv = new mindspore::kernel::DeConvInt8CPUKernel(
|
||||
reinterpret_cast<OpParameter *>(deconv_param), inputs_, outputs_, ctx, nullptr);
|
||||
auto *deconv = new mindspore::kernel::DeConvInt8CPUKernel(reinterpret_cast<OpParameter *>(deconv_param), inputs_,
|
||||
outputs_, ctx, nullptr);
|
||||
|
||||
deconv->Init();
|
||||
deconv->Run();
|
||||
CompareOutputData(reinterpret_cast<int8_t *>(outputs_[0]->MutableData()), correct, total_size, 3);
|
||||
ASSERT_EQ(0, CompareOutputData(reinterpret_cast<int8_t *>(outputs_[0]->MutableData()), correct, total_size, 3));
|
||||
|
||||
delete deconv_param;
|
||||
delete deconv;
|
||||
|
|
|
@ -147,7 +147,7 @@ TEST_F(TestFcInt8, fctest1) {
|
|||
QuantProcess(correct, out_params.len, out_params.min, out_params.max, &out_scale, &out_zp, nullptr);
|
||||
float *out = new float[out_params.len];
|
||||
Dequantize(reinterpret_cast<int8_t *>(outputs[0]->MutableData()), outputs[0]->ElementsNum(), out_scale, out_zp, out);
|
||||
CompareOutputData(out, correct, 6, 0.3);
|
||||
ASSERT_EQ(0, CompareOutputData(out, correct, 6, 0.3));
|
||||
delete fc;
|
||||
for (auto t : inputs) delete t;
|
||||
for (auto t : outputs) delete t;
|
||||
|
|
|
@ -92,7 +92,7 @@ TEST_F(TestGatherNdInt8, GatherNdTest) {
|
|||
printf("%d, ", output[i]);
|
||||
}
|
||||
std::cout << std::endl;
|
||||
CompareOutputData(output.data(), corr_out.data(), output0_tensor.ElementsNum(), 0.001);
|
||||
ASSERT_EQ(0, CompareOutputData(output.data(), corr_out.data(), output0_tensor.ElementsNum(), 0.001));
|
||||
|
||||
input0_tensor.set_data(nullptr);
|
||||
input1_tensor.set_data(nullptr);
|
||||
|
|
|
@ -90,7 +90,7 @@ TEST_F(TestGatherInt8, GatherTest) {
|
|||
printf("%d, ", output[i]);
|
||||
}
|
||||
std::cout << std::endl;
|
||||
CompareOutputData(output.data(), corr_out.data(), output0_tensor.ElementsNum(), 0.001);
|
||||
ASSERT_EQ(0, CompareOutputData(output.data(), corr_out.data(), output0_tensor.ElementsNum(), 0.001));
|
||||
|
||||
input0_tensor.set_data(nullptr);
|
||||
input1_tensor.set_data(nullptr);
|
||||
|
|
|
@ -126,7 +126,7 @@ TEST_F(TestMatmulInt8, simple) {
|
|||
MatMulInt8_16x4_r(a_r4x16, b_c16x4, output, ROW, COL, DEPTH16, COL, a_sums, bias, &ls, &rs, &multiplier, 0, INT8_MIN,
|
||||
INT8_MAX, false);
|
||||
#endif
|
||||
CompareOutputData(output, correct, ROW * COL, 0.1);
|
||||
ASSERT_EQ(0, CompareOutputData(output, correct, ROW * COL, 0.1));
|
||||
delete[] a_r4x16;
|
||||
delete[] b_c16x4;
|
||||
}
|
||||
|
@ -187,7 +187,7 @@ TEST_F(TestMatmulInt8, mmtest1) {
|
|||
QuantProcess(correct, out_params.len, out_params.min, out_params.max, &out_scale, &out_zp, nullptr);
|
||||
float *out = new float[out_params.len];
|
||||
Dequantize(reinterpret_cast<int8_t *>(outputs[0]->MutableData()), outputs[0]->ElementsNum(), out_scale, out_zp, out);
|
||||
CompareOutputData(out, correct, 6, 0.3);
|
||||
ASSERT_EQ(0, CompareOutputData(out, correct, 6, 0.3));
|
||||
delete mm;
|
||||
for (auto t : inputs) delete t;
|
||||
for (auto t : outputs) delete t;
|
||||
|
@ -304,7 +304,7 @@ TEST_F(TestMatmulInt8, mmtest2) {
|
|||
QuantProcess(correct, out_params.len, out_params.min, out_params.max, &out_scale, &out_zp, nullptr);
|
||||
float *out = new float[out_params.len];
|
||||
Dequantize(reinterpret_cast<int8_t *>(outputs[0]->MutableData()), outputs[0]->ElementsNum(), out_scale, out_zp, out);
|
||||
CompareOutputData(out, correct, 6, 0.6);
|
||||
ASSERT_EQ(0, CompareOutputData(out, correct, 6, 0.6));
|
||||
delete mm;
|
||||
for (auto t : inputs) delete t;
|
||||
for (auto t : outputs) delete t;
|
||||
|
|
|
@ -91,7 +91,7 @@ TEST_F(TestMulInt8, Mul_quant0) {
|
|||
|
||||
std::vector<int8_t> except_result = {1, 4, 3, 8, 5, 12, 21, 32, 27, 40, 33, 48};
|
||||
PrintData("output data", output, input1.size());
|
||||
CompareOutputData(output, except_result.data(), input1.size(), 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(output, except_result.data(), input1.size(), 0.000001));
|
||||
input_tensor1->set_data(nullptr);
|
||||
input_tensor2->set_data(nullptr);
|
||||
output0_tensor->set_data(nullptr);
|
||||
|
@ -162,7 +162,7 @@ TEST_F(TestMulInt8, Mul_quant0_thread0) {
|
|||
|
||||
std::vector<int8_t> except_result = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18};
|
||||
PrintData("output data", output, input1.size());
|
||||
CompareOutputData(output, except_result.data(), input1.size(), 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(output, except_result.data(), input1.size(), 0.000001));
|
||||
input_tensor1->set_data(nullptr);
|
||||
input_tensor2->set_data(nullptr);
|
||||
output0_tensor->set_data(nullptr);
|
||||
|
@ -233,7 +233,7 @@ TEST_F(TestMulInt8, Mul_quant1) {
|
|||
|
||||
std::vector<int8_t> except_result = {1, 2, 2, 4, 3, 6, 11, 16, 14, 20, 17, 24};
|
||||
PrintData("output data", output, input1.size());
|
||||
CompareOutputData(output, except_result.data(), input1.size(), 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(output, except_result.data(), input1.size(), 0.000001));
|
||||
input_tensor1->set_data(nullptr);
|
||||
input_tensor2->set_data(nullptr);
|
||||
output0_tensor->set_data(nullptr);
|
||||
|
@ -304,7 +304,7 @@ TEST_F(TestMulInt8, Mul_quant1_thread1) {
|
|||
|
||||
std::vector<int8_t> except_result = {1, 2, 2, 4, 3, 6, 11, 16, 14, 20, 17, 24};
|
||||
PrintData("output data", output, input1.size());
|
||||
CompareOutputData(output, except_result.data(), input1.size(), 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(output, except_result.data(), input1.size(), 0.000001));
|
||||
input_tensor1->set_data(nullptr);
|
||||
input_tensor2->set_data(nullptr);
|
||||
output0_tensor->set_data(nullptr);
|
||||
|
@ -375,7 +375,7 @@ TEST_F(TestMulInt8, test) {
|
|||
|
||||
std::vector<int8_t> except_result = {1, 4, 9, 16, 25, 36, 7, 16, 27, 40, 55, 72};
|
||||
PrintData("output data", output, input1.size());
|
||||
CompareOutputData(output, except_result.data(), input1.size(), 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(output, except_result.data(), input1.size(), 0.000001));
|
||||
input_tensor1->set_data(nullptr);
|
||||
input_tensor2->set_data(nullptr);
|
||||
output0_tensor->set_data(nullptr);
|
||||
|
|
|
@ -74,7 +74,7 @@ TEST_F(TestPadInt8, PadInt8Test1) {
|
|||
|
||||
pad->Init();
|
||||
pad->Run();
|
||||
CompareOutputData(reinterpret_cast<int8_t *>(outputs_[0]->MutableData()), correct, total_size, 0);
|
||||
ASSERT_EQ(0, CompareOutputData(reinterpret_cast<int8_t *>(outputs_[0]->MutableData()), correct, total_size, 0));
|
||||
|
||||
delete pad_param;
|
||||
delete pad;
|
||||
|
@ -127,7 +127,7 @@ TEST_F(TestPadInt8, PadInt8Test2) {
|
|||
|
||||
pad->Init();
|
||||
pad->Run();
|
||||
CompareOutputData(reinterpret_cast<int8_t *>(outputs_[0]->MutableData()), correct, total_size, 0);
|
||||
ASSERT_EQ(0, CompareOutputData(reinterpret_cast<int8_t *>(outputs_[0]->MutableData()), correct, total_size, 0));
|
||||
|
||||
delete pad_param;
|
||||
delete pad;
|
||||
|
@ -195,7 +195,7 @@ TEST_F(TestPadInt8, PadInt8TestInit4) {
|
|||
|
||||
pad->Init();
|
||||
pad->Run();
|
||||
CompareOutputData(reinterpret_cast<int8_t *>(outputs_[0]->MutableData()), correct, total_size, 0);
|
||||
ASSERT_EQ(0, CompareOutputData(reinterpret_cast<int8_t *>(outputs_[0]->MutableData()), correct, total_size, 0));
|
||||
|
||||
delete pad_param;
|
||||
delete pad;
|
||||
|
|
|
@ -79,7 +79,7 @@ TEST_F(TestPowerInt8, PowerInt8) {
|
|||
kernel->Run();
|
||||
|
||||
std::vector<int8_t> except_result = {-112, -65, 15, 127};
|
||||
CompareOutputData(output.data(), except_result.data(), input.size(), 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(output.data(), except_result.data(), input.size(), 0.000001));
|
||||
|
||||
input0_tensor.set_data(nullptr);
|
||||
output0_tensor.set_data(nullptr);
|
||||
|
@ -148,7 +148,7 @@ TEST_F(TestPowerInt8, normal) {
|
|||
kernel->Run();
|
||||
|
||||
std::vector<int8_t> except_result = {-99, 95, 124, -14};
|
||||
CompareOutputData(output.data(), except_result.data(), input.size(), 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(output.data(), except_result.data(), input.size(), 0.000001));
|
||||
|
||||
input0_tensor.set_data(nullptr);
|
||||
output0_tensor.set_data(nullptr);
|
||||
|
|
|
@ -85,7 +85,7 @@ TEST_F(TestPreluInt8, prelu_1) {
|
|||
std::vector<int8_t> except_result = {1, -1, 3, 4, 5, 6, 7, -2};
|
||||
PrintData("output data", output, output_size);
|
||||
PrintData("output data shape", output_tensor_shape.data(), output_tensor_shape.size());
|
||||
CompareOutputData(output, except_result.data(), output_size, 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(output, except_result.data(), output_size, 0.000001));
|
||||
|
||||
input_tensor1->set_data(nullptr);
|
||||
output0_tensor->set_data(nullptr);
|
||||
|
|
|
@ -77,7 +77,7 @@ TEST_F(QuantDTypeCastTestFp32, QuantDTypeCastTest1) {
|
|||
std::cout << output[i] << " ";
|
||||
}
|
||||
std::cout << "\n";
|
||||
CompareOutputData(output.data(), expect_out, out_size, 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(output.data(), expect_out, out_size, 0.000001));
|
||||
}
|
||||
|
||||
TEST_F(QuantDTypeCastTestFp32, QuantDTypeCastTest2) {
|
||||
|
@ -124,6 +124,6 @@ TEST_F(QuantDTypeCastTestFp32, QuantDTypeCastTest2) {
|
|||
std::cout << output[i] << " ";
|
||||
}
|
||||
std::cout << "\n";
|
||||
CompareOutputData(output.data(), expect_out, out_size, 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(output.data(), expect_out, out_size, 0.000001));
|
||||
}
|
||||
} // namespace mindspore
|
||||
|
|
|
@ -81,7 +81,7 @@ TEST_F(TestReshapeInt8, reshape_quant0) {
|
|||
std::vector<int8_t> except_result = {10, 11, 12, 13, 14, 15, 20, 21, 22, 23, 24, 25};
|
||||
PrintData("output data", output, input1.size());
|
||||
PrintData("output data shape", output_tensor_shape.data(), output_tensor_shape.size());
|
||||
CompareOutputData(output, except_result.data(), input1.size(), 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(output, except_result.data(), input1.size(), 0.000001));
|
||||
|
||||
input_tensor1->set_data(nullptr);
|
||||
output0_tensor->set_data(nullptr);
|
||||
|
@ -141,7 +141,7 @@ TEST_F(TestReshapeInt8, reshape_quant1_thread2) {
|
|||
std::vector<int8_t> except_result = {6, 7, 7, 8, 8, 9, 11, 12, 12, 13, 13, 14};
|
||||
PrintData("output data", output, input1.size());
|
||||
PrintData("output data shape", output_tensor_shape.data(), output_tensor_shape.size());
|
||||
CompareOutputData(output, except_result.data(), input1.size(), 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(output, except_result.data(), input1.size(), 0.000001));
|
||||
|
||||
input_tensor1->set_data(nullptr);
|
||||
output0_tensor->set_data(nullptr);
|
||||
|
|
|
@ -84,7 +84,7 @@ TEST_F(TestSoftmaxInt8, SoftmaxInt8) {
|
|||
|
||||
std::vector<int8_t> except_result = {-126, -126, -124, -124, -123, -124, -116, -116, 122, 122, 112, 112,
|
||||
-127, -127, -127, -127, -59, -59, -61, -59, 58, 58, 59, 58};
|
||||
CompareOutputData(output.data(), except_result.data(), input.size(), 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(output.data(), except_result.data(), input.size(), 0.000001));
|
||||
|
||||
input0_tensor.set_data(nullptr);
|
||||
output0_tensor.set_data(nullptr);
|
||||
|
|
|
@ -100,8 +100,8 @@ TEST_F(TestSplitInt8, Split_quant0_thread2) {
|
|||
PrintData("output data shape", output1_tensor_shape.data(), output1_tensor_shape.size());
|
||||
PrintData("output data", output2, output2_size);
|
||||
PrintData("output data shape", output2_tensor_shape.data(), output2_tensor_shape.size());
|
||||
CompareOutputData(output1, except_result1.data(), output1_size, 0.000001);
|
||||
CompareOutputData(output2, except_result2.data(), output2_size, 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(output1, except_result1.data(), output1_size, 0.000001));
|
||||
ASSERT_EQ(0, CompareOutputData(output2, except_result2.data(), output2_size, 0.000001));
|
||||
|
||||
input_tensor1->set_data(nullptr);
|
||||
output1_tensor->set_data(nullptr);
|
||||
|
@ -194,9 +194,9 @@ TEST_F(TestSplitInt8, Split_quant0_thread2_num) {
|
|||
PrintData("output data shape", output2_tensor_shape.data(), output2_tensor_shape.size());
|
||||
PrintData("output data", output3, output3_size);
|
||||
PrintData("output data shape", output3_tensor_shape.data(), output3_tensor_shape.size());
|
||||
CompareOutputData(output1, except_result1.data(), output1_size, 0.000001);
|
||||
CompareOutputData(output2, except_result2.data(), output2_size, 0.000001);
|
||||
CompareOutputData(output3, except_result3.data(), output3_size, 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(output1, except_result1.data(), output1_size, 0.000001));
|
||||
ASSERT_EQ(0, CompareOutputData(output2, except_result2.data(), output2_size, 0.000001));
|
||||
ASSERT_EQ(0, CompareOutputData(output3, except_result3.data(), output3_size, 0.000001));
|
||||
|
||||
input_tensor1->set_data(nullptr);
|
||||
output1_tensor->set_data(nullptr);
|
||||
|
@ -291,9 +291,9 @@ TEST_F(TestSplitInt8, Split_quant1_thread2_num) {
|
|||
PrintData("output data shape", output2_tensor_shape.data(), output2_tensor_shape.size());
|
||||
PrintData("output data", output3, output3_size);
|
||||
PrintData("output data shape", output3_tensor_shape.data(), output3_tensor_shape.size());
|
||||
CompareOutputData(output1, except_result1.data(), output1_size, 0.000001);
|
||||
CompareOutputData(output2, except_result2.data(), output2_size, 0.000001);
|
||||
CompareOutputData(output3, except_result3.data(), output3_size, 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(output1, except_result1.data(), output1_size, 0.000001));
|
||||
ASSERT_EQ(0, CompareOutputData(output2, except_result2.data(), output2_size, 0.000001));
|
||||
ASSERT_EQ(0, CompareOutputData(output3, except_result3.data(), output3_size, 0.000001));
|
||||
|
||||
input_tensor1->set_data(nullptr);
|
||||
output1_tensor->set_data(nullptr);
|
||||
|
|
|
@ -85,7 +85,7 @@ TEST_F(TestSqueezeInt8, Squeeze_1d_axis0_offset0_quant0_thread2) {
|
|||
std::vector<int8_t> except_result = {1, 2, 3, 4, 5, 6, 7, 8};
|
||||
PrintData("output data", output, output_size);
|
||||
PrintData("output data shape", output_tensor_shape.data(), output_tensor_shape.size());
|
||||
CompareOutputData(output, except_result.data(), output_size, 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(output, except_result.data(), output_size, 0.000001));
|
||||
|
||||
input_tensor1->set_data(nullptr);
|
||||
output0_tensor->set_data(nullptr);
|
||||
|
|
|
@ -84,7 +84,7 @@ TEST_F(TestUnsqueezeInt8, Unsqueeze_1) {
|
|||
std::vector<int8_t> except_result = {1, 2, 3, 4, 5, 6, 7, 8};
|
||||
PrintData("output data", output, output_size);
|
||||
PrintData("output data shape", output_tensor_shape.data(), output_tensor_shape.size());
|
||||
CompareOutputData(output, except_result.data(), output_size, 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(output, except_result.data(), output_size, 0.000001));
|
||||
|
||||
input_tensor1->set_data(nullptr);
|
||||
output0_tensor->set_data(nullptr);
|
||||
|
|
|
@ -141,7 +141,7 @@ TEST_F(TestBatchnormOpenCLCI, Batchnormfp32CI) {
|
|||
sub_graph->Run();
|
||||
|
||||
auto *output_data_gpu = reinterpret_cast<float *>(output_tensor->data_c());
|
||||
CompareOutputData(output_data_gpu, correct_data, output_tensor->ElementsNum(), 0.0001);
|
||||
ASSERT_EQ(0, CompareOutputData(output_data_gpu, correct_data, output_tensor->ElementsNum(), 0.0001));
|
||||
for (auto tensor : inputs) {
|
||||
tensor->set_data(nullptr);
|
||||
delete tensor;
|
||||
|
@ -260,7 +260,7 @@ TEST_F(TestBatchnormOpenCLfp16, Batchnormfp16input_dim4) {
|
|||
sub_graph->Run();
|
||||
|
||||
auto *output_data_gpu = reinterpret_cast<float16_t *>(output_tensor->data_c());
|
||||
CompareOutputData(output_data_gpu, correct_data, output_tensor->ElementsNum(), 0.01);
|
||||
ASSERT_EQ(0, CompareOutputData(output_data_gpu, correct_data, output_tensor->ElementsNum(), 0.01));
|
||||
for (auto tensor : inputs) {
|
||||
tensor->set_data(nullptr);
|
||||
delete tensor;
|
||||
|
@ -378,7 +378,7 @@ TEST_F(TestBatchnormOpenCLfp32, Batchnormfp32input_dim4) {
|
|||
sub_graph->Run();
|
||||
|
||||
auto *output_data_gpu = reinterpret_cast<float *>(output_tensor->data_c());
|
||||
CompareOutputData(output_data_gpu, correct_data, output_tensor->ElementsNum(), 0.0001);
|
||||
ASSERT_EQ(0, CompareOutputData(output_data_gpu, correct_data, output_tensor->ElementsNum(), 0.0001));
|
||||
for (auto tensor : inputs) {
|
||||
tensor->set_data(nullptr);
|
||||
delete tensor;
|
||||
|
|
|
@ -124,7 +124,7 @@ TEST_F(TestConcatOpenCLCI, ConcatFp32_2inputforCI) {
|
|||
std::cout << "==================output data================" << std::endl;
|
||||
sub_graph->Run();
|
||||
auto *output_data_gpu = reinterpret_cast<float *>(output_tensor->data_c());
|
||||
CompareOutputData(output_data_gpu, correctOutput, output_tensor->ElementsNum(), 0.00001);
|
||||
ASSERT_EQ(0, CompareOutputData(output_data_gpu, correctOutput, output_tensor->ElementsNum(), 0.00001));
|
||||
for (auto tensor : inputs) {
|
||||
tensor->set_data(nullptr);
|
||||
delete tensor;
|
||||
|
@ -252,7 +252,7 @@ TEST_F(TestConcatOpenCLfp16, ConcatFp16_4input_dim4_axis1) {
|
|||
std::cout << "==================output data================" << std::endl;
|
||||
sub_graph->Run();
|
||||
auto *output_data_gpu = reinterpret_cast<float16_t *>(output_tensor->data_c());
|
||||
CompareOutputData(output_data_gpu, correctOutput, output_tensor->ElementsNum(), 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(output_data_gpu, correctOutput, output_tensor->ElementsNum(), 0.000001));
|
||||
for (auto tensor : inputs) {
|
||||
tensor->set_data(nullptr);
|
||||
delete tensor;
|
||||
|
@ -371,7 +371,7 @@ TEST_F(TestConcatOpenCLfp32, ConcatFp32_3input_dim4_axis1) {
|
|||
std::cout << "==================output data================" << std::endl;
|
||||
sub_graph->Run();
|
||||
auto *output_data_gpu = reinterpret_cast<float *>(output_tensor->data_c());
|
||||
CompareOutputData(output_data_gpu, correctOutput, output_tensor->ElementsNum(), 0.00001);
|
||||
ASSERT_EQ(0, CompareOutputData(output_data_gpu, correctOutput, output_tensor->ElementsNum(), 0.00001));
|
||||
for (auto tensor : inputs) {
|
||||
tensor->set_data(nullptr);
|
||||
delete tensor;
|
||||
|
@ -510,7 +510,7 @@ TEST_F(TestConcatOpenCLfp16, ConcatFp16_6input_dim4_axis1) {
|
|||
std::cout << "==================output data================" << std::endl;
|
||||
sub_graph->Run();
|
||||
auto *output_data_gpu = reinterpret_cast<float16_t *>(output_tensor->MutableData());
|
||||
CompareOutputData(output_data_gpu, correctOutput, output_tensor->ElementsNum(), 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(output_data_gpu, correctOutput, output_tensor->ElementsNum(), 0.000001));
|
||||
for (auto tensor : inputs) {
|
||||
tensor->set_data(nullptr);
|
||||
delete tensor;
|
||||
|
|
|
@ -83,7 +83,7 @@ TEST_F(TestFillOpenCLCI, Fp32testfill) {
|
|||
std::cout << "==================output data================" << std::endl;
|
||||
sub_graph->Run();
|
||||
auto *output_data_gpu = reinterpret_cast<float *>(output_tensor.data_c());
|
||||
CompareOutputData(output_data_gpu, correctOutput, output_tensor.ElementsNum(), 0.0001);
|
||||
ASSERT_EQ(0, CompareOutputData(output_data_gpu, correctOutput, output_tensor.ElementsNum(), 0.0001));
|
||||
delete sub_graph;
|
||||
}
|
||||
|
||||
|
@ -139,7 +139,7 @@ TEST_F(TestFillOpenCLCI, Fp32testshape) {
|
|||
std::cout << "==================output data================" << std::endl;
|
||||
sub_graph->Run();
|
||||
auto *output_data_gpu = reinterpret_cast<float *>(output_tensor.data_c());
|
||||
CompareOutputData(output_data_gpu, correctOutput, output_tensor.ElementsNum(), 0.0001);
|
||||
ASSERT_EQ(0, CompareOutputData(output_data_gpu, correctOutput, output_tensor.ElementsNum(), 0.0001));
|
||||
delete sub_graph;
|
||||
}
|
||||
} // namespace mindspore
|
||||
|
|
|
@ -94,7 +94,7 @@ TEST_F(TestSwishOpenCLCI, Fp32CI) {
|
|||
std::cout << "==================output data================" << std::endl;
|
||||
sub_graph->Run();
|
||||
auto *output_data_gpu = reinterpret_cast<float *>(output_tensor.data_c());
|
||||
CompareOutputData(output_data_gpu, correctOutput, output_tensor.ElementsNum(), 0.0001);
|
||||
ASSERT_EQ(0, CompareOutputData(output_data_gpu, correctOutput, output_tensor.ElementsNum(), 0.0001));
|
||||
delete sub_graph;
|
||||
}
|
||||
} // namespace mindspore
|
||||
|
|
|
@ -69,8 +69,8 @@ void TEST_MAIN(PadParameter *param, Format input_format, Format output_format, F
|
|||
sub_graph->Init();
|
||||
memcpy(input.data_c(), input_data, input.Size());
|
||||
sub_graph->Run();
|
||||
if (lite::CompareOutputData(reinterpret_cast<float *>(output.data_c()), output.ElementsNum(),
|
||||
const_cast<float *>(expect_data), output.ElementsNum())) {
|
||||
if (CommonTest::CompareOutputData(reinterpret_cast<float *>(output.data_c()), const_cast<float *>(expect_data),
|
||||
static_cast<size_t>(output.ElementsNum()))) {
|
||||
FAIL();
|
||||
} else {
|
||||
std::cout << "COMPARE SUCCESS!\n";
|
||||
|
|
|
@ -96,7 +96,7 @@ TEST_F(TestSparseToDenseOpenCLCI, Fp32Dim2Scalar) {
|
|||
std::cout << "==================output data================" << std::endl;
|
||||
sub_graph->Run();
|
||||
auto *output_data_gpu = reinterpret_cast<float *>(output_tensor.data_c());
|
||||
CompareOutputData(output_data_gpu, correctOutput, output_tensor.ElementsNum(), 0.0001);
|
||||
ASSERT_EQ(0, CompareOutputData(output_data_gpu, correctOutput, output_tensor.ElementsNum(), 0.0001));
|
||||
delete sub_graph;
|
||||
}
|
||||
|
||||
|
@ -168,7 +168,7 @@ TEST_F(TestSparseToDenseOpenCLCI, Fp32Dim2Vector) {
|
|||
std::cout << "==================output data================" << std::endl;
|
||||
sub_graph->Run();
|
||||
auto *output_data_gpu = reinterpret_cast<float *>(output_tensor.data_c());
|
||||
CompareOutputData(output_data_gpu, correctOutput, output_tensor.ElementsNum(), 0.0001);
|
||||
ASSERT_EQ(0, CompareOutputData(output_data_gpu, correctOutput, output_tensor.ElementsNum(), 0.0001));
|
||||
delete sub_graph;
|
||||
}
|
||||
|
||||
|
@ -239,7 +239,7 @@ TEST_F(TestSparseToDenseOpenCLCI, Fp32Dim2Shape1Vector) {
|
|||
std::cout << "==================output data================" << std::endl;
|
||||
sub_graph->Run();
|
||||
auto *output_data_gpu = reinterpret_cast<float *>(output_tensor.data_c());
|
||||
CompareOutputData(output_data_gpu, correctOutput, output_tensor.ElementsNum(), 0.0001);
|
||||
ASSERT_EQ(0, CompareOutputData(output_data_gpu, correctOutput, output_tensor.ElementsNum(), 0.0001));
|
||||
delete sub_graph;
|
||||
}
|
||||
|
||||
|
@ -310,7 +310,7 @@ TEST_F(TestSparseToDenseOpenCLCI, Fp32Dim2Shape1Scalar) {
|
|||
std::cout << "==================output data================" << std::endl;
|
||||
sub_graph->Run();
|
||||
auto *output_data_gpu = reinterpret_cast<float *>(output_tensor.data_c());
|
||||
CompareOutputData(output_data_gpu, correctOutput, output_tensor.ElementsNum(), 0.0001);
|
||||
ASSERT_EQ(0, CompareOutputData(output_data_gpu, correctOutput, output_tensor.ElementsNum(), 0.0001));
|
||||
delete sub_graph;
|
||||
}
|
||||
|
||||
|
@ -381,7 +381,7 @@ TEST_F(TestSparseToDenseOpenCLCI, Fp32Dim1Scalar) {
|
|||
std::cout << "==================output data================" << std::endl;
|
||||
sub_graph->Run();
|
||||
auto *output_data_gpu = reinterpret_cast<float *>(output_tensor.data_c());
|
||||
CompareOutputData(output_data_gpu, correctOutput, output_tensor.ElementsNum(), 0.0001);
|
||||
ASSERT_EQ(0, CompareOutputData(output_data_gpu, correctOutput, output_tensor.ElementsNum(), 0.0001));
|
||||
delete sub_graph;
|
||||
}
|
||||
|
||||
|
@ -452,7 +452,7 @@ TEST_F(TestSparseToDenseOpenCLCI, Fp32Dim1Vector) {
|
|||
std::cout << "==================output data================" << std::endl;
|
||||
sub_graph->Run();
|
||||
auto *output_data_gpu = reinterpret_cast<float *>(output_tensor.data_c());
|
||||
CompareOutputData(output_data_gpu, correctOutput, output_tensor.ElementsNum(), 0.0001);
|
||||
ASSERT_EQ(0, CompareOutputData(output_data_gpu, correctOutput, output_tensor.ElementsNum(), 0.0001));
|
||||
delete sub_graph;
|
||||
}
|
||||
|
||||
|
|
|
@ -135,7 +135,7 @@ TEST_F(TestStackOpenCLCI, StackFp32_8inputforCI) {
|
|||
std::cout << "==================output data================" << std::endl;
|
||||
sub_graph->Run();
|
||||
auto *output_data_gpu = reinterpret_cast<float *>(output_tensor->data_c());
|
||||
CompareOutputData(output_data_gpu, correctOutput, output_tensor->ElementsNum(), 0.00001);
|
||||
ASSERT_EQ(0, CompareOutputData(output_data_gpu, correctOutput, output_tensor->ElementsNum(), 0.00001));
|
||||
for (auto tensor : inputs) {
|
||||
tensor->set_data(nullptr);
|
||||
delete tensor;
|
||||
|
@ -268,7 +268,7 @@ TEST_F(TestStackOpenCLfp16, StackFp32_8inputaxis1) {
|
|||
std::cout << "==================output data================" << std::endl;
|
||||
sub_graph->Run();
|
||||
auto *output_data_gpu = reinterpret_cast<float16_t *>(output_tensor->MutableData());
|
||||
CompareOutputData(output_data_gpu, correctOutput, output_tensor->ElementsNum(), 0.000001);
|
||||
ASSERT_EQ(0, CompareOutputData(output_data_gpu, correctOutput, output_tensor->ElementsNum(), 0.000001));
|
||||
for (auto tensor : inputs) {
|
||||
tensor->set_data(nullptr);
|
||||
delete tensor;
|
||||
|
|
|
@ -100,7 +100,7 @@ TEST_F(TestToFormatOpenCL, ToFormatNHWC2NCHW) {
|
|||
std::cout << std::endl;
|
||||
|
||||
// compare
|
||||
CompareOutputData(output_data, correct_data, h * w * c, 0.00001);
|
||||
ASSERT_EQ(0, CompareOutputData(output_data, correct_data, h * w * c, 0.00001));
|
||||
MS_LOG(INFO) << "Test TransposeFp32 passed";
|
||||
}
|
||||
} // namespace mindspore
|
||||
|
|
Loading…
Reference in New Issue